def handle(self, *args, **options): quiet = options.get('quiet', False) prod_indexes = options.get('prod-indexes', False) docs_root = os.path.normpath(settings.DOCUMENT_ROOT) if not quiet: self.stdout.write( 'Cleaning UP stored files in DOCUMENT_ROOT: %s \n' % docs_root) shutil.rmtree(docs_root) os.makedirs(docs_root) if not quiet: self.stdout.write('done\n') if not quiet: self.stdout.write('Deleting CouchDB debug mode databases.\n') databases = settings.COUCHDB_DATABASES server = Server() try: for database in databases: dbname = database[0] + '_test' if not quiet: self.stdout.write('Deleting DB: %s\n' % dbname) server.delete_db(dbname) if not quiet: self.stdout.write('done\n') except Exception, e: if not quiet: self.stdout.write( 'Failed to delete debug databases in CouchDB: %s ' % e) pass
def handle(self, *args, **options): quiet = options.get('quiet', False) prod_indexes = options.get('prod-indexes', False) docs_root = os.path.normpath(settings.DOCUMENT_ROOT) if not quiet: self.stdout.write('Cleaning UP stored files in DOCUMENT_ROOT: %s \n' % docs_root) shutil.rmtree(docs_root) os.makedirs(docs_root) if not quiet: self.stdout.write('done\n') if not quiet: self.stdout.write('Deleting CouchDB debug mode databases.\n') databases = settings.COUCHDB_DATABASES server = Server() try: for database in databases: dbname = database[0] + '_test' if not quiet: self.stdout.write('Deleting DB: %s\n' % dbname) server.delete_db(dbname) if not quiet: self.stdout.write('done\n') except Exception, e: if not quiet: self.stdout.write('Failed to delete debug databases in CouchDB: %s ' % e) pass
class CouchDBServer(object): def __init__(self): self.__get_server_uri() self.__authenticate() self.__connect() def __get_server_uri(self): couchdb_port = config.couchdb.port if config.couchdb.protocol == 'http' else config.couchdb.ssl_port self.__couchdb_uri = "%s://%s:%s" % (config.couchdb.protocol, config.couchdb.host, couchdb_port) def __authenticate(self): user, passwd = config.couchdb.user, config.couchdb.password if all((user, passwd)): auth = restkit.BasicAuth(user, passwd) self.__auth_resource = CouchdbResource(filters=[auth]) else: self.__auth_resource = None def __connect(self): self.__server = Server(uri=self.__couchdb_uri, resource_instance=self.__auth_resource) def list_workspaces(self): return filter(is_usable_workspace, self.__server.all_dbs()) def get_workspace_handler(self, ws_name): return self.__server.get_db(ws_name) def get_or_create_db(self, ws_name): return self.__server.get_or_create_db(ws_name) def create_db(self, ws_name): return self.__server.create_db(ws_name) def delete_db(self, ws_name): return self.__server.delete_db(ws_name)
def setUp(self): self.db_conf = { "couchdb_uri": "http://*****:*****@cdb:5984", "couchdb_db": "desk_tester", } self.conf = { "powerdns_backend": "sqlite", "powerdns_db": "/var/services/data/powerdns/pdns_dnsa.test.sqlite3", "powerdns_name": "dnsa.test", "powerdns_primary": "dnsa.test", "worker_is_foreman": True, } self.conf.update(self.db_conf) s = Server(self.db_conf["couchdb_uri"]) self.s = s s.create_db(self.db_conf['couchdb_db']) self.db = self.s.get_db(self.db_conf["couchdb_db"]) self.couch_up = CouchdbUploader( path=os.path.dirname(__file__), auth=('admin', 'admin'), **self.db_conf ) status_code = self.couch_up.put( data="@fixtures/couchdb-design.json", doc_id="_design/{couchdb_db}" ) if not status_code == 201: s.delete_db(self.db_conf["couchdb_db"]) worker_id = "worker-foreman" d = { "_id": worker_id, "type": "worker", "hostname": "foreman", "provides": { "domain": [{"backend": "powerdns", "name": "dnsa.test"}] } } self.assertTrue( self.couch_up.put(data=json.dumps(d), doc_id=worker_id) == 201 ) self.assertTrue( self.couch_up.put(data="@fixtures/couchdb-template-dns.json", doc_id="template-email") == 201 ) self.assertTrue( self.couch_up.put(data="@fixtures/couchdb-map-ips.json", doc_id="map-ips") == 201 )
class Couch(): def __init__(self): self.server = Server() self.server.delete_db('test') self.db = self.server.get_or_create_db('test') def populate(self): things = [ {"name": "Vishnu"}, {"name": "Lakshmi"}, {"name": "Ganesha"}, {"name": "Krishna"}, {"name": "Murugan"} ] self.db.save_docs(things) def count(self): return self.db.all_docs().count()
class Couch(): def __init__(self): self.server = Server() self.server.delete_db('test') self.db = self.server.get_or_create_db('test') def populate(self): things = [{ "name": "Vishnu" }, { "name": "Lakshmi" }, { "name": "Ganesha" }, { "name": "Krishna" }, { "name": "Murugan" }] self.db.save_docs(things) def count(self): return self.db.all_docs().count()
class DbConnectorCouchTestSuite(unittest.TestCase): def setUp(self): self.couch_srv = Server(uri=CONF.getCouchURI()) self.db_name = new_random_workspace_name() self.db = self.couch_srv.create_db(self.db_name) def tearDown(self): self.couch_srv.delete_db(self.db_name) time.sleep(3) def test_save_Document(self): couchConnector = CouchDbConnector(self.db) doc = {'_id': '123', 'data': 'some data'} couchConnector.saveDocument(doc) doc_from_db = self.db.get('123') self.assertNotEquals(doc_from_db, None, "Document should be retrieved") self.assertEquals(doc_from_db.get('data'), 'some data', "Data retrieved should be the same as data saved") def test_get_Document(self): couchConnector = CouchDbConnector(self.db) doc = {'_id': '123', 'data': 'some data'} couchConnector.saveDocument(doc) doc_retrieved = couchConnector.getDocument('123') self.assertNotEquals(doc_retrieved, None, "Document should be retrieved") self.assertEquals(doc_retrieved.get('data'), 'some data', "Data retrieved should be the same as data saved") def test_remove_Document(self): couchConnector = CouchDbConnector(self.db) doc = {'_id': '123', 'data': 'some data'} couchConnector.saveDocument(doc) couchConnector.remove('123') try: doc_from_db = self.db.get('123') except ResourceNotFound: doc_from_db = None self.assertEquals(doc_from_db, None, "Document should be None") def test_get_by_parent_and_type(self): couchConnector = CouchDbConnector(self.db) doc = { '_id': '123', 'type': 'father', 'parent': None, } couchConnector.saveDocument(doc) doc = { '_id': '456', 'type': 'child', 'parent': '123', } couchConnector.saveDocument(doc) doc = { '_id': '789', 'type': 'child', 'parent': '123', } couchConnector.saveDocument(doc) ids = couchConnector.getDocsByFilter(parentId='123', type='child') self.assertEquals(len(ids), 2, "There should be two 'childs' with parent '123'") self.assertIn('456', ids, "Child '456' should be in the list of childs") self.assertIn('789', ids, "Child '789' should be in the list of childs") ids = couchConnector.getDocsByFilter(parentId='123', type='son') self.assertEquals(len(ids), 0, "There shouldn't be any 'son' with parent '123'") ids = couchConnector.getDocsByFilter(parentId='456', type='child') self.assertEquals(len(ids), 0, "There shouldn't be any 'child' with parent '456'")
class CouchdbManager(PersistenceManager): """ This is a couchdb manager for the workspace, it will load from the couchdb databases""" def __init__(self, uri): self._last_seq_ack = 0 model.api.log("Initializing CouchDBManager for url [%s]" % uri) self._lostConnection = False self.__uri = uri self.__dbs = {} self.__seq_nums = {} self.__serv = NoConectionServer() self.mutex = threading.Lock() self._available = False try: self.testCouchUrl(uri) url = urlparse(uri) print("Setting user,pass %s %s" % (url.username, url.password)) self.__serv = Server(uri=uri) #print dir(self.__serv) self.__serv.resource_class.credentials = (url.username, url.password) self._available = True except: model.api.log("No route to couchdb server on: %s" % uri) print(traceback.format_exc()) def isAvailable(self): return self._available def lostConnectionResolv(self): self._lostConnection = True self.__dbs.clear() self.__serv = NoConectionServer() def reconnect(self): ret_val = False ur = self.__uri if CouchdbManager.testCouch(ur): self.__serv = Server(uri=ur) self.__dbs.clear() self._lostConnection = False ret_val = True return ret_val @staticmethod def testCouch(uri): host, port = None, None try: import socket url = urlparse(uri) proto = url.scheme host = url.hostname port = url.port port = port if port else socket.getservbyname(proto) s = socket.socket() s.settimeout(1) s.connect((host, int(port))) except: return False model.api.log("Connecting Couch to: %s:%s" % (host, port)) return True def testCouchUrl(self, uri): url = urlparse(uri) proto = url.scheme host = url.hostname port = url.port self.test(host, int(port)) def test(self, address, port): import socket s = socket.socket() s.settimeout(1) s.connect((address, port)) @trap_timeout def getWorkspacesNames(self): return filter(lambda x: not x.startswith("_"), self.__serv.all_dbs()) def workspaceExists(self, name): return name in self.getWorkspacesNames() @trap_timeout def addWorkspace(self, aWorkspace): self.__serv.create_db(aWorkspace.lower()) return self.__getDb(aWorkspace) @trap_timeout def addDocument(self, aWorkspaceName, documentId, aDocument): self.incrementSeqNumber(aWorkspaceName) self.__getDb(aWorkspaceName)[documentId] = aDocument @trap_timeout def saveDocument(self, aWorkspaceName, aDocument): self.incrementSeqNumber(aWorkspaceName) model.api.log("Saving document in remote workspace %s" % aWorkspaceName) self.__getDb(aWorkspaceName).save_doc(aDocument, use_uuids=True, force_update=True) @trap_timeout def __getDb(self, aWorkspaceName): aWorkspaceName = aWorkspaceName.lower() model.api.log("Getting workspace [%s]" % aWorkspaceName) workspacedb = self.__dbs.get(aWorkspaceName, self.__serv.get_db(aWorkspaceName)) if not self.__dbs.has_key(aWorkspaceName): model.api.log("Asking couchdb for workspace [%s]" % aWorkspaceName) self.__dbs[aWorkspaceName] = workspacedb self.__seq_nums[aWorkspaceName] = workspacedb.info()['update_seq'] return workspacedb @trap_timeout def getDocument(self, aWorkspaceName, documentId): model.api.log("Getting document for workspace [%s]" % aWorkspaceName) return self.__getDb(aWorkspaceName).get(documentId) @trap_timeout def checkDocument(self, aWorkspaceName, documentName): return self.__getDb(aWorkspaceName).doc_exist(documentName) @trap_timeout def replicate(self, workspace, *targets_dbs, **kwargs): model.api.log("Targets to replicate %s" % str(targets_dbs)) for target_db in targets_dbs: src_db_path = "/".join([self.__uri, workspace]) dst_db_path = "/".join([target_db, workspace]) try: model.api.devlog( "workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs)) self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs) except ResourceNotFound as e: raise e except Exception as e: model.api.devlog(e) raise def __peerReplication(self, workspace, src, dst, **kwargs): mutual = kwargs.get("mutual", True) continuous = kwargs.get("continuous", True) ct = kwargs.get("create_target", True) self.__serv.replicate(workspace, dst, mutual=mutual, continuous=continuous, create_target=ct) if mutual: self.__serv.replicate(dst, src, continuous=continuous, **kwargs) def getLastChangeSeq(self, workspaceName): self.mutex.acquire() seq = self.__seq_nums[workspaceName] self.mutex.release() return seq def setLastChangeSeq(self, workspaceName, seq_num): self.mutex.acquire() self.__seq_nums[workspaceName] = seq_num self.mutex.release() @trap_timeout def waitForDBChange(self, db_name, since=0, timeout=15000): """ Be warned this will return after the database has a change, if there was one before call it will return immediatly with the changes done""" changes = [] last_seq = max(self.getLastChangeSeq(db_name), since) db = self.__getDb(db_name) with ChangesStream(db, feed="longpoll", since=last_seq, timeout=timeout) as stream: for change in stream: if change['seq'] > self.getLastChangeSeq(db_name): changes.append(change) last_seq = reduce(lambda x, y: max(y['seq'], x), changes, self.getLastChangeSeq(db_name)) self.setLastChangeSeq(db_name, last_seq) return changes @trap_timeout def delete_all_dbs(self): for db in self.__serv.all_dbs(): self.__serv.delete_db(db) @trap_timeout def existWorkspace(self, name): return name in self.__serv.all_dbs() @trap_timeout def workspaceDocumentsIterator(self, workspaceName): return filter(lambda x: not x["id"].startswith("_"), self.__getDb(workspaceName).documents(include_docs=True)) @trap_timeout def removeWorkspace(self, workspace_name): return self.__serv.delete_db(workspace_name) @trap_timeout def remove(self, workspace, host_id): self.incrementSeqNumber(workspace) self.__dbs[workspace].delete_doc(host_id) @trap_timeout def compactDatabase(self, aWorkspaceName): self.__getDb(aWorkspaceName).compact() def pushReports(self): vmanager = ViewsManager() reports = os.path.join(os.getcwd(), "views", "reports") workspace = self.__serv.get_or_create_db("reports") vmanager.addView(reports, workspace) return self.__uri + "/reports/_design/reports/index.html" def addViews(self, workspaceName): vmanager = ViewsManager() workspace = self.__getDb(workspaceName) for v in vmanager.getAvailableViews(): vmanager.addView(v, workspace) def getViews(self, workspaceName): vmanager = ViewsManager() workspace = self.__getDb(workspaceName) return vmanager.getViews(workspace) def syncWorkspaceViews(self, workspaceName): vmanager = ViewsManager() workspace = self.__getDb(workspaceName) installed_views = vmanager.getViews(workspace) for v in vmanager.getAvailableViews(): if v not in installed_views: vmanager.addView(v, workspace) def incrementSeqNumber(self, workspaceName): self.mutex.acquire() self.__seq_nums[workspaceName] += 1 self.mutex.release()
class CouchDbManager(AbstractPersistenceManager): """ This is a couchdb manager for the workspace, it will load from the couchdb databases """ def __init__(self, uri): super(CouchDbManager, self).__init__() getLogger(self).debug( "Initializing CouchDBManager for url [%s]" % uri) self._lostConnection = False self.__uri = uri self.__serv = NoConectionServer() self._available = False try: if uri is not None: self.testCouchUrl(uri) url = urlparse(uri) getLogger(self).debug( "Setting user,pass %s %s" % (url.username, url.password)) self.__serv = Server(uri=uri) self.__serv.resource_class.credentials = (url.username, url.password) self._available = True self.pushReports() self._loadDbs() except: getLogger(self).warn("No route to couchdb server on: %s" % uri) getLogger(self).debug(traceback.format_exc()) #@trap_timeout def _create(self, name): db = self.__serv.create_db(name.lower()) return CouchDbConnector(db) #@trap_timeout def _delete(self, name): self.__serv.delete_db(name) #@trap_timeout def _loadDbs(self): conditions = lambda x: not x.startswith("_") and x != 'reports' for dbname in filter(conditions, self.__serv.all_dbs()): if dbname not in self.dbs.keys(): getLogger(self).debug( "Asking for dbname[%s], registering for lazy initialization" % dbname) self.dbs[dbname] = lambda x: self._loadDb(x) def _loadDb(self, dbname): db = self.__serv.get_db(dbname) seq = db.info()['update_seq'] self.dbs[dbname] = CouchDbConnector(db, seq_num=seq) return self.dbs[dbname] #@trap_timeout def pushReports(self): vmanager = ViewsManager() reports = os.path.join(os.getcwd(), "views", "reports") workspace = self.__serv.get_or_create_db("reports") vmanager.addView(reports, workspace) return self.__uri + "/reports/_design/reports/index.html" def lostConnectionResolv(self): self._lostConnection = True self.__dbs.clear() self.__serv = NoConectionServer() def reconnect(self): ret_val = False ur = self.__uri if CouchDbManager.testCouch(ur): self.__serv = Server(uri = ur) self.__dbs.clear() self._lostConnection = False ret_val = True return ret_val @staticmethod def testCouch(uri): if uri is not None: host, port = None, None try: import socket url = urlparse(uri) proto = url.scheme host = url.hostname port = url.port port = port if port else socket.getservbyname(proto) s = socket.socket() s.settimeout(1) s.connect((host, int(port))) except: return False #getLogger(CouchdbManager).info("Connecting Couch to: %s:%s" % (host, port)) return True def testCouchUrl(self, uri): if uri is not None: url = urlparse(uri) proto = url.scheme host = url.hostname port = url.port self.test(host, int(port)) def test(self, address, port): import socket s = socket.socket() s.settimeout(1) s.connect((address, port)) #@trap_timeout def replicate(self, workspace, *targets_dbs, **kwargs): getLogger(self).debug("Targets to replicate %s" % str(targets_dbs)) for target_db in targets_dbs: src_db_path = "/".join([self.__uri, workspace]) dst_db_path = "/".join([target_db, workspace]) try: getLogger(self).info("workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs)) self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs) except ResourceNotFound as e: raise e except Exception as e: getLogger(self).error(e) raise def __peerReplication(self, workspace, src, dst, **kwargs): mutual = kwargs.get("mutual", True) continuous = kwargs.get("continuous", True) ct = kwargs.get("create_target", True) self.__serv.replicate(workspace, dst, mutual = mutual, continuous = continuous, create_target = ct) if mutual: self.__serv.replicate(dst, src, continuous = continuous, **kwargs)
class CouchdbManager(PersistenceManager): """ This is a couchdb manager for the workspace, it will load from the couchdb databases""" def __init__(self, uri): self._last_seq_ack = 0 getLogger(self).debug("Initializing CouchDBManager for url [%s]" % uri) self._lostConnection = False self.__uri = uri self.__dbs = {} self.__seq_nums = {} self.__serv = NoConectionServer() self.mutex = threading.Lock() self._available = False #setting the doc types to load from couch def get_types(subclasses): if len(subclasses): head = subclasses[0] tail = [] if len(subclasses[1:]): tail = subclasses[1:] return get_types(head.__subclasses__()) + [head.class_signature] + get_types(tail) return [] self._model_object_types = get_types([ModelObject]) try: if uri is not None: self.testCouchUrl(uri) url = urlparse(uri) getLogger(self).debug("Setting user,pass %s %s" % (url.username, url.password)) self.__serv = Server(uri=uri) #print dir(self.__serv) self.__serv.resource_class.credentials = (url.username, url.password) self._available = True except: getLogger(self).warn("No route to couchdb server on: %s" % uri) getLogger(self).debug(traceback.format_exc()) def isAvailable(self): return self._available def lostConnectionResolv(self): self._lostConnection = True self.__dbs.clear() self.__serv = NoConectionServer() def reconnect(self): ret_val = False ur = self.__uri if CouchdbManager.testCouch(ur): self.__serv = Server(uri = ur) self.__dbs.clear() self._lostConnection = False ret_val = True return ret_val @staticmethod def testCouch(uri): if uri is not None: host, port = None, None try: import socket url = urlparse(uri) proto = url.scheme host = url.hostname port = url.port port = port if port else socket.getservbyname(proto) s = socket.socket() s.settimeout(1) s.connect((host, int(port))) except: return False getLogger(CouchdbManager).info("Connecting Couch to: %s:%s" % (host, port)) return True def testCouchUrl(self, uri): if uri is not None: url = urlparse(uri) proto = url.scheme host = url.hostname port = url.port self.test(host, int(port)) def test(self, address, port): import socket s = socket.socket() s.settimeout(1) s.connect((address, port)) @trap_timeout def getWorkspacesNames(self): return filter(lambda x: not x.startswith("_"), self.__serv.all_dbs()) def workspaceExists(self, name): return name in self.getWorkspacesNames() @trap_timeout def addWorkspace(self, aWorkspace): self.__serv.create_db(aWorkspace.lower()) return self._getDb(aWorkspace) @trap_timeout def addDocument(self, aWorkspaceName, documentId, aDocument): self._getDb(aWorkspaceName) self.incrementSeqNumber(aWorkspaceName) self._getDb(aWorkspaceName)[documentId] = aDocument @trap_timeout def saveDocument(self, aWorkspaceName, aDocument): self.incrementSeqNumber(aWorkspaceName) getLogger(self).debug("Saving document in remote workspace %s" % aWorkspaceName) return self._getDb(aWorkspaceName).save_doc(aDocument, use_uuids = True, force_update = True) def _getDb(self, aWorkspaceName): if not self.__dbs.has_key(aWorkspaceName): self.__getDb(aWorkspaceName) return self.__dbs.get(aWorkspaceName, None) @trap_timeout def __getDb(self, aWorkspaceName): aWorkspaceName = aWorkspaceName.lower() getLogger(self).debug("Getting workspace [%s]" % aWorkspaceName) workspacedb = self.__dbs.get(aWorkspaceName, self.__serv.get_db(aWorkspaceName)) if not self.__dbs.has_key(aWorkspaceName): getLogger(self).debug("Asking couchdb for workspace [%s]" % aWorkspaceName) self.__dbs[aWorkspaceName] = workspacedb self.__seq_nums[aWorkspaceName] = workspacedb.info()['update_seq'] return workspacedb @trap_timeout def getDocument(self, aWorkspaceName, documentId): getLogger(self).debug("Getting document for workspace [%s]" % aWorkspaceName) return self._getDb(aWorkspaceName).get(documentId) @trap_timeout def getDeletedDocument(self, aWorkspaceName, documentId, documentRev): return self._getDb(aWorkspaceName).get(documentId, rev=documentRev) @trap_timeout def checkDocument(self, aWorkspaceName, documentName): return self._getDb(aWorkspaceName).doc_exist(documentName) @trap_timeout def replicate(self, workspace, *targets_dbs, **kwargs): getLogger(self).debug("Targets to replicate %s" % str(targets_dbs)) for target_db in targets_dbs: src_db_path = "/".join([self.__uri, workspace]) dst_db_path = "/".join([target_db, workspace]) try: getLogger(self).info("workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs)) self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs) except ResourceNotFound as e: raise e except Exception as e: getLogger(self).error(e) raise def __peerReplication(self, workspace, src, dst, **kwargs): mutual = kwargs.get("mutual", True) continuous = kwargs.get("continuous", True) ct = kwargs.get("create_target", True) self.__serv.replicate(workspace, dst, mutual = mutual, continuous = continuous, create_target = ct) if mutual: self.__serv.replicate(dst, src, continuous = continuous, **kwargs) def getLastChangeSeq(self, workspaceName): self.mutex.acquire() seq = self.__seq_nums[workspaceName] self.mutex.release() return seq def setLastChangeSeq(self, workspaceName, seq_num): self.mutex.acquire() self.__seq_nums[workspaceName] = seq_num self.mutex.release() @trap_timeout def waitForDBChange(self, db_name, since = 0, timeout = 15000): """ Be warned this will return after the database has a change, if there was one before call it will return immediatly with the changes done""" changes = [] last_seq = max(self.getLastChangeSeq(db_name), since) db = self._getDb(db_name) with ChangesStream(db, feed="longpoll", since=last_seq, timeout=timeout) as stream: for change in stream: if change['seq'] > self.getLastChangeSeq(db_name): self.setLastChangeSeq(db_name, change['seq']) if not change['id'].startswith('_design'): #fake doc type for deleted objects doc = {'type': 'unknown', '_deleted': 'False', '_rev':[0]} if not change.get('deleted'): doc = self.getDocument(db_name, change['id']) changes.append(change_factory.create(doc)) if len(changes): getLogger(self).debug("Changes from another instance") return changes @trap_timeout def delete_all_dbs(self): for db in self.__serv.all_dbs(): self.__serv.delete_db(db) @trap_timeout def existWorkspace(self, name): return name in self.__serv.all_dbs() @trap_timeout def workspaceDocumentsIterator(self, workspaceName): return filter(self.filterConditions, self._getDb(workspaceName).documents(include_docs=True)) def filterConditions(self, doc): ret = True ret = ret and not doc["id"].startswith("_") ret = ret and doc['doc']["type"] in self._model_object_types return ret @trap_timeout def removeWorkspace(self, workspace_name): return self.__serv.delete_db(workspace_name) @trap_timeout def remove(self, workspace, host_id): self.incrementSeqNumber(workspace) self.__dbs[workspace].delete_doc(host_id) @trap_timeout def compactDatabase(self, aWorkspaceName): self._getDb(aWorkspaceName).compact() def pushReports(self): vmanager = ViewsManager() reports = os.path.join(os.getcwd(), "views", "reports") workspace = self.__serv.get_or_create_db("reports") vmanager.addView(reports, workspace) return self.__uri + "/reports/_design/reports/index.html" def addViews(self, workspaceName): vmanager = ViewsManager() workspace = self._getDb(workspaceName) for v in vmanager.getAvailableViews(): vmanager.addView(v, workspace) def getViews(self, workspaceName): vmanager = ViewsManager() workspace = self._getDb(workspaceName) return vmanager.getViews(workspace) def syncWorkspaceViews(self, workspaceName): vmanager = ViewsManager() workspace = self._getDb(workspaceName) installed_views = vmanager.getViews(workspace) for v in vmanager.getAvailableViews(): if v not in installed_views: vmanager.addView(v, workspace) def incrementSeqNumber(self, workspaceName): self.mutex.acquire() if not self.__seq_nums.has_key(workspaceName): self.__seq_nums[workspaceName] = 0 self.__seq_nums[workspaceName] += 1 self.mutex.release()
class DbConnectorCouchTestSuite(unittest.TestCase): def setUp(self): self.couch_srv = Server(uri=CONF.getCouchURI()) self.db_name = new_random_workspace_name() self.db = self.couch_srv.create_db(self.db_name) def tearDown(self): self.couch_srv.delete_db(self.db_name) time.sleep(3) def test_save_Document(self): couchConnector = CouchDbConnector(self.db) doc = {"_id": "123", "data": "some data"} couchConnector.saveDocument(doc) doc_from_db = self.db.get("123") self.assertNotEquals(doc_from_db, None, "Document should be retrieved") self.assertEquals(doc_from_db.get("data"), "some data", "Data retrieved should be the same as data saved") def test_get_Document(self): couchConnector = CouchDbConnector(self.db) doc = {"_id": "123", "data": "some data"} couchConnector.saveDocument(doc) doc_retrieved = couchConnector.getDocument("123") self.assertNotEquals(doc_retrieved, None, "Document should be retrieved") self.assertEquals(doc_retrieved.get("data"), "some data", "Data retrieved should be the same as data saved") def test_remove_Document(self): couchConnector = CouchDbConnector(self.db) doc = {"_id": "123", "data": "some data"} couchConnector.saveDocument(doc) couchConnector.remove("123") try: doc_from_db = self.db.get("123") except ResourceNotFound: doc_from_db = None self.assertEquals(doc_from_db, None, "Document should be None") def test_get_by_parent_and_type(self): couchConnector = CouchDbConnector(self.db) doc = {"_id": "123", "type": "father", "parent": None} couchConnector.saveDocument(doc) doc = {"_id": "456", "type": "child", "parent": "123"} couchConnector.saveDocument(doc) doc = {"_id": "789", "type": "child", "parent": "123"} couchConnector.saveDocument(doc) ids = couchConnector.getDocsByFilter(parentId="123", type="child") self.assertEquals(len(ids), 2, "There should be two 'childs' with parent '123'") self.assertIn("456", ids, "Child '456' should be in the list of childs") self.assertIn("789", ids, "Child '789' should be in the list of childs") ids = couchConnector.getDocsByFilter(parentId="123", type="son") self.assertEquals(len(ids), 0, "There shouldn't be any 'son' with parent '123'") ids = couchConnector.getDocsByFilter(parentId="456", type="child") self.assertEquals(len(ids), 0, "There shouldn't be any 'child' with parent '456'")
def tearDown(self): server = Server(uri=CONF.getCouchURI()) if self.dbname in server.all_dbs(): server.delete_db(self.dbname)
def database(request, database_name): server = Server(settings.COUCHDB_SERVER) if request.GET.get("empty") and request.POST.get("confirmation"): documents_deleted = empty_database(server, database_name) messages.success( request, "Database '%s' has been emptied of %i documents." % (database_name, documents_deleted) ) return HttpResponseRedirect(reverse("cushion_database", args=(database_name,))) if request.GET.get("delete") and request.POST.get("confirmation"): server.delete_db(database_name) messages.success(request, "Database '%s' has been deleted." % database_name) return HttpResponseRedirect(reverse("cushion_index")) context = {} database = server.get_or_create_db(database_name) if request.GET.get("add"): context["add_forms"] = form_registry if "add_form" in request.GET and request.GET.get("add_form") in form_registry: add_form_cls = form_registry.get(request.GET.get("add_form")) add_form = add_form_cls(request.POST or None) if add_form.is_valid(): document = add_form.save(commit=False) document.set_db(database) document.save() messages.success( request, "Document '%s' has been saved." % document.get_id ) # Save and add another. if "add another" in request.POST["save"].lower(): redirect_url = "%s?add=1&add_form=%s" % ( reverse( "cushion_database", args=(database_name,) ), request.GET.get("add_form") ) # Save and view new document. else: redirect_url = reverse( "cushion_document", args=(database_name, document.get_id,) ) return HttpResponseRedirect(redirect_url) context["add_form"] = add_form if request.GET.get("compact"): database.compact() messages.success(request, "Database '%s' has been compacted." % database_name) return HttpResponseRedirect(reverse("cushion_database", args=(database_name,))) form = ImportDataForm(request.POST or None, request.FILES or None) if form.is_valid(): errors = form.import_data(database, request.FILES["file"]) if len(errors) > 0: messages.error(request, "There was a problem with one or more rows in your data. Please correct these rows and try uploading again.") context["errors"] = errors else: messages.success(request, "Your data was imported successfully.") return HttpResponseRedirect(reverse("cushion_database", args=(database_name,))) # Fetch all documents defining a key range that includes only design # documents. views_by_design_doc = {} for design_doc in database.all_docs(startkey="_design", endkey="_design0"): doc = database.get(design_doc["id"]) if "views" in doc: # Convert "_design/mydesigndoc" to "mydesigndoc". design_doc_name = design_doc["id"].split("/")[1] views_by_design_doc[design_doc_name] = sorted(doc["views"].keys()) context.update({ "title": "Database: %s" % database_name, "server": server, "database_info": database.info(), "database_name": database.dbname, "views_by_design_doc": views_by_design_doc, "form": form, "confirm_empty": request.GET.get("empty"), "confirm_delete": request.GET.get("delete") }) return render_to_response("cushion/database.html", context, context_instance=RequestContext(request))
class CouchDbManager(AbstractPersistenceManager): """ This is a couchdb manager for the workspace, it will load from the couchdb databases """ def __init__(self, uri, couch_exception_callback): super(CouchDbManager, self).__init__() getLogger(self).debug( "Initializing CouchDBManager for url [%s]" % uri) self._lostConnection = False self.__uri = uri self._available = False self.couch_exception_callback = couch_exception_callback test_couch_thread = threading.Thread(target=self.continuosly_check_connection) test_couch_thread.daemon = True test_couch_thread.start() try: if uri is not None: self.testCouchUrl(uri) url = urlparse(uri) getLogger(self).debug( "Setting user,pass %s %s" % (url.username, url.password)) self.__serv = Server(uri=uri) self.__serv.resource_class.credentials = (url.username, url.password) self._available = True self.pushReports() self._loadDbs() except: getLogger(self).warn("No route to couchdb server on: %s" % uri) getLogger(self).debug(traceback.format_exc()) def continuosly_check_connection(self): """Intended to use on a separate thread. Call module-level function testCouch every second to see if response to the server_uri of the DB is still 200. Call the exception_callback if we can't access the server three times in a row. """ tolerance = 0 server_uri = self.__uri while True: time.sleep(1) test_was_successful = test_couch(server_uri) if test_was_successful: tolerance = 0 else: tolerance += 1 if tolerance == 3: self.couch_exception_callback() return False # kill the thread if something went wrong def _create(self, name): db = self.__serv.create_db(name.lower()) return CouchDbConnector(db) def _delete(self, name): self.__serv.delete_db(name) def _loadDbs(self): def conditions(database): begins_with_underscore = database.startswith("_") is_blacklisted = database in CONST_BLACKDBS return not begins_with_underscore and not is_blacklisted try: for dbname in filter(conditions, self.__serv.all_dbs()): if dbname not in self.dbs.keys(): getLogger(self).debug( "Asking for dbname[%s], registering for lazy initialization" % dbname) self.dbs[dbname] = lambda x: self._loadDb(x) except restkit.errors.RequestError as req_error: getLogger(self).error("Couldn't load databases. " "The connection to the CouchDB was probably lost. ") def _loadDb(self, dbname): db = self.__serv.get_db(dbname) seq = db.info()['update_seq'] self.dbs[dbname] = CouchDbConnector(db, seq_num=seq) return self.dbs[dbname] def refreshDbs(self): """Refresh databases using inherited method. On exception, asume no databases are available. """ try: return AbstractPersistenceManager.refreshDbs() except: return [] def pushReports(self): vmanager = ViewsManager() reports = os.path.join(os.getcwd(), "views", "reports") try: workspace = self.__serv.get_or_create_db("reports") vmanager.addView(reports, workspace) except: getLogger(self).warn( "Reports database couldn't be uploaded. You need to be an admin to do it") return self.__uri + "/reports/_design/reports/index.html" @staticmethod def testCouch(uri): """Redirect to the module-level function of the name, which serves the same purpose and is used by other classes too.""" return test_couch(uri) def testCouchUrl(self, uri): if uri is not None: url = urlparse(uri) host = url.hostname port = url.port self.test(host, int(port)) def test(self, address, port): import socket s = socket.socket() s.settimeout(1) s.connect((address, port)) def replicate(self, workspace, *targets_dbs, **kwargs): getLogger(self).debug("Targets to replicate %s" % str(targets_dbs)) for target_db in targets_dbs: src_db_path = "/".join([self.__uri, workspace]) dst_db_path = "/".join([target_db, workspace]) try: getLogger(self).info("workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs)) self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs) except ResourceNotFound as e: raise e except Exception as e: getLogger(self).error(e) raise def __peerReplication(self, workspace, src, dst, **kwargs): mutual = kwargs.get("mutual", True) continuous = kwargs.get("continuous", True) ct = kwargs.get("create_target", True) self.__serv.replicate(workspace, dst, mutual = mutual, continuous = continuous, create_target = ct) if mutual: self.__serv.replicate(dst, src, continuous = continuous, **kwargs)
class CouchdbManager(PersistenceManager): """ This is a couchdb manager for the workspace, it will load from the couchdb databases""" def __init__(self, uri): self._last_seq_ack = 0 model.api.log("Initializing CouchDBManager for url [%s]" % uri) self._lostConnection = False self.__uri = uri self.__dbs = {} self.__seq_nums = {} self.__serv = NoConectionServer() self.mutex = threading.Lock() self._available = False try: self.testCouchUrl(uri) self.__serv = Server(uri = uri) self._available = True except: model.api.log("No route to couchdb server on: %s" % uri) def isAvailable(self): return self._available def lostConnectionResolv(self): self._lostConnection = True self.__dbs.clear() self.__serv = NoConectionServer() def reconnect(self): ret_val = False ur = self.__uri if CouchdbManager.testCouch(ur): self.__serv = Server(uri = ur) self.__dbs.clear() self._lostConnection = False ret_val = True return ret_val @staticmethod def testCouch(uri): host, port = None, None try: import socket proto, netloc, _, _, _ = urlsplit(uri) host, port = splitport(netloc) port = port if port else socket.getservbyname(proto) s = socket.socket() s.settimeout(1) s.connect((host, int(port))) except: return False model.api.log("Connecting Couch to: %s:%s" % (host, port)) return True def testCouchUrl(self, uri): _, netloc, _, _, _ = urlsplit(uri) host, port = splitport(netloc) self.test(host, int(port)) def test(self, address, port): import socket s = socket.socket() s.settimeout(1) s.connect((address, port)) @trap_timeout def getWorkspacesNames(self): return filter(lambda x: not x.startswith("_"), self.__serv.all_dbs()) def workspaceExists(self, name): return name in self.getWorkspacesNames() @trap_timeout def addWorkspace(self, aWorkspace): self.__serv.create_db(aWorkspace.lower()) return self.__getDb(aWorkspace) @trap_timeout def addDocument(self, aWorkspaceName, documentId, aDocument): self.incrementSeqNumber(aWorkspaceName) self.__getDb(aWorkspaceName)[documentId] = aDocument @trap_timeout def saveDocument(self, aWorkspaceName, aDocument): self.incrementSeqNumber(aWorkspaceName) model.api.log("Saving document in remote workspace %s" % aWorkspaceName) self.__getDb(aWorkspaceName).save_doc(aDocument, use_uuids = True, force_update = True) @trap_timeout def __getDb(self, aWorkspaceName): aWorkspaceName = aWorkspaceName.lower() model.api.log("Getting workspace [%s]" % aWorkspaceName) workspacedb = self.__dbs.get(aWorkspaceName, self.__serv.get_db(aWorkspaceName)) if not self.__dbs.has_key(aWorkspaceName): model.api.log("Asking couchdb for workspace [%s]" % aWorkspaceName) self.__dbs[aWorkspaceName] = workspacedb self.__seq_nums[aWorkspaceName] = workspacedb.info()['update_seq'] return workspacedb @trap_timeout def getDocument(self, aWorkspaceName, documentId): model.api.log("Getting document for workspace [%s]" % aWorkspaceName) return self.__getDb(aWorkspaceName).get(documentId) @trap_timeout def checkDocument(self, aWorkspaceName, documentName): return self.__getDb(aWorkspaceName).doc_exist(documentName) @trap_timeout def replicate(self, workspace, *targets_dbs, **kwargs): model.api.log("Targets to replicate %s" % str(targets_dbs)) for target_db in targets_dbs: src_db_path = "/".join([self.__uri, workspace]) dst_db_path = "/".join([target_db, workspace]) try: model.api.devlog("workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs)) self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs) except ResourceNotFound as e: raise e except Exception as e: model.api.devlog(e) raise def __peerReplication(self, workspace, src, dst, **kwargs): mutual = kwargs.get("mutual", True) continuous = kwargs.get("continuous", True) ct = kwargs.get("create_target", True) self.__serv.replicate(workspace, dst, mutual = mutual, continuous = continuous, create_target = ct) if mutual: self.__serv.replicate(dst, src, continuous = continuous, **kwargs) def getLastChangeSeq(self, workspaceName): self.mutex.acquire() seq = self.__seq_nums[workspaceName] self.mutex.release() return seq def setLastChangeSeq(self, workspaceName, seq_num): self.mutex.acquire() self.__seq_nums[workspaceName] = seq_num self.mutex.release() @trap_timeout def waitForDBChange(self, db_name, since = 0, timeout = 15000): """ Be warned this will return after the database has a change, if there was one before call it will return immediatly with the changes done""" changes = [] last_seq = max(self.getLastChangeSeq(db_name), since) db = self.__getDb(db_name) with ChangesStream(db, feed="longpoll", since = last_seq, timeout = timeout) as stream: for change in stream: if change['seq'] > self.getLastChangeSeq(db_name): changes.append(change) last_seq = reduce(lambda x,y: max(y['seq'], x) , changes, self.getLastChangeSeq(db_name)) self.setLastChangeSeq(db_name, last_seq) return changes @trap_timeout def delete_all_dbs(self): for db in self.__serv.all_dbs(): self.__serv.delete_db(db) @trap_timeout def existWorkspace(self, name): return name in self.__serv.all_dbs() @trap_timeout def workspaceDocumentsIterator(self, workspaceName): return filter(lambda x: not x["id"].startswith("_"), self.__getDb(workspaceName).documents(include_docs=True)) @trap_timeout def removeWorkspace(self, workspace_name): return self.__serv.delete_db(workspace_name) @trap_timeout def remove(self, workspace, host_id): self.incrementSeqNumber(workspace) self.__dbs[workspace].delete_doc(host_id) @trap_timeout def compactDatabase(self, aWorkspaceName): self.__getDb(aWorkspaceName).compact() def pushReports(self): vmanager = ViewsManager() reports = os.path.join(os.getcwd(), "views", "reports") workspace = self.__serv.get_or_create_db("reports") vmanager.addView(reports, workspace) return self.__uri + "/reports/_design/reports/index.html" def addViews(self, workspaceName): vmanager = ViewsManager() workspace = self.__getDb(workspaceName) for v in vmanager.getAvailableViews(): vmanager.addView(v, workspace) def getViews(self, workspaceName): vmanager = ViewsManager() workspace = self.__getDb(workspaceName) return vmanager.getViews(workspace) def syncWorkspaceViews(self, workspaceName): vmanager = ViewsManager() workspace = self.__getDb(workspaceName) installed_views = vmanager.getViews(workspace) for v in vmanager.getAvailableViews(): if v not in installed_views: vmanager.addView(v, workspace) def incrementSeqNumber(self, workspaceName): self.mutex.acquire() self.__seq_nums[workspaceName] += 1 self.mutex.release()
from pylonsapp.tests import * from couchdbkit import Server try: server = Server() if server: pass except: server = None else: try: server.delete_db('formalchemy_test') except: pass db = server.get_or_create_db('formalchemy_test') def couchdb_runing(func): if server: return func else: def f(self): pass return f class TestCouchdbController(TestController): @couchdb_runing def test_index(self): response = self.app.get('/couchdb')
class ClientViewTestCase(unittest.TestCase): def setUp(self): self.couchdb = CouchdbResource() self.Server = Server() def tearDown(self): try: del self.Server["couchdbkit_test"] except: pass try: self.Server.delete_db("couchdbkit_test2") except: pass def testView(self): db = self.Server.create_db("couchdbkit_test") # save 2 docs doc1 = {"_id": "test", "string": "test", "number": 4, "docType": "test"} db.save_doc(doc1) doc2 = {"_id": "test2", "string": "test", "number": 2, "docType": "test"} db.save_doc(doc2) design_doc = { "_id": "_design/test", "language": "javascript", "views": { "all": { "map": """function(doc) { if (doc.docType == "test") { emit(doc._id, doc); }}""" } }, } db.save_doc(design_doc) doc3 = db.get("_design/test") self.assertIsNotNone(doc3) results = db.view("test/all") self.assertEqual(len(results), 2) del self.Server["couchdbkit_test"] def testAllDocs(self): db = self.Server.create_db("couchdbkit_test") # save 2 docs doc1 = {"_id": "test", "string": "test", "number": 4, "docType": "test"} db.save_doc(doc1) doc2 = {"_id": "test2", "string": "test", "number": 2, "docType": "test"} db.save_doc(doc2) self.assertEqual(db.view("_all_docs").count(), 2) self.assertEqual(db.view("_all_docs").all(), db.all_docs().all()) del self.Server["couchdbkit_test"] def testCount(self): db = self.Server.create_db("couchdbkit_test") # save 2 docs doc1 = {"_id": "test", "string": "test", "number": 4, "docType": "test"} db.save_doc(doc1) doc2 = {"_id": "test2", "string": "test", "number": 2, "docType": "test"} db.save_doc(doc2) design_doc = { "_id": "_design/test", "language": "javascript", "views": {"all": {"map": """function(doc) { if (doc.docType == "test") { emit(doc._id, doc); }}"""}}, } db.save_doc(design_doc) count = db.view("/test/all").count() self.assertEqual(count, 2) del self.Server["couchdbkit_test"] def testTemporaryView(self): db = self.Server.create_db("couchdbkit_test") # save 2 docs doc1 = {"_id": "test", "string": "test", "number": 4, "docType": "test"} db.save_doc(doc1) doc2 = {"_id": "test2", "string": "test", "number": 2, "docType": "test"} db.save_doc(doc2) design_doc = { "map": """function(doc) { if (doc.docType == "test") { emit(doc._id, doc); }}""" } results = db.temp_view(design_doc) self.assertEqual(len(results), 2) del self.Server["couchdbkit_test"] def testView2(self): db = self.Server.create_db("couchdbkit_test") # save 2 docs doc1 = {"_id": "test1", "string": "test", "number": 4, "docType": "test"} db.save_doc(doc1) doc2 = {"_id": "test2", "string": "test", "number": 2, "docType": "test"} db.save_doc(doc2) doc3 = {"_id": "test3", "string": "test", "number": 2, "docType": "test2"} db.save_doc(doc3) design_doc = { "_id": "_design/test", "language": "javascript", "views": { "with_test": { "map": """function(doc) { if (doc.docType == "test") { emit(doc._id, doc); }}""" }, "with_test2": { "map": """function(doc) { if (doc.docType == "test2") { emit(doc._id, doc); }}""" }, }, } db.save_doc(design_doc) # yo view is callable ! results = db.view("test/with_test") self.assertEqual(len(results), 2) results = db.view("test/with_test2") self.assertEqual(len(results), 1) del self.Server["couchdbkit_test"] def testViewWithParams(self): db = self.Server.create_db("couchdbkit_test") # save 2 docs doc1 = {"_id": "test1", "string": "test", "number": 4, "docType": "test", "date": "20081107"} db.save_doc(doc1) doc2 = {"_id": "test2", "string": "test", "number": 2, "docType": "test", "date": "20081107"} db.save_doc(doc2) doc3 = {"_id": "test3", "string": "machin", "number": 2, "docType": "test", "date": "20081007"} db.save_doc(doc3) doc4 = {"_id": "test4", "string": "test2", "number": 2, "docType": "test", "date": "20081108"} db.save_doc(doc4) doc5 = {"_id": "test5", "string": "test2", "number": 2, "docType": "test", "date": "20081109"} db.save_doc(doc5) doc6 = {"_id": "test6", "string": "test2", "number": 2, "docType": "test", "date": "20081109"} db.save_doc(doc6) design_doc = { "_id": "_design/test", "language": "javascript", "views": { "test1": { "map": """function(doc) { if (doc.docType == "test") { emit(doc.string, doc); }}""" }, "test2": { "map": """function(doc) { if (doc.docType == "test") { emit(doc.date, doc); }}""" }, "test3": { "map": """function(doc) { if (doc.docType == "test") { emit(doc.string, doc); }}""" }, }, } db.save_doc(design_doc) results = db.view("test/test1") self.assertEqual(len(results), 6) results = db.view("test/test3", key="test") self.assertEqual(len(results), 2) results = db.view("test/test3", key="test2") self.assertEqual(len(results), 3) results = db.view("test/test2", startkey="200811") self.assertEqual(len(results), 5) results = db.view("test/test2", startkey="20081107", endkey="20081108") self.assertEqual(len(results), 3) results = db.view("test/test1", keys=["test", "machin"]) self.assertEqual(len(results), 3) del self.Server["couchdbkit_test"] def testMultiWrap(self): """ Tests wrapping of view results to multiple classes using the client """ class A(Document): pass class B(Document): pass design_doc = { "_id": "_design/test", "language": "javascript", "views": {"all": {"map": """function(doc) { emit(doc._id, doc); }"""}}, } a = A() a._id = "1" b = B() b._id = "2" db = self.Server.create_db("couchdbkit_test") A._db = db B._db = db a.save() b.save() db.save_doc(design_doc) # provide classes as a list results = list(db.view("test/all", schema=[A, B])) self.assertEqual(results[0].__class__, A) self.assertEqual(results[1].__class__, B) # provide classes as a dict results = list(db.view("test/all", schema={"A": A, "B": B})) self.assertEqual(results[0].__class__, A) self.assertEqual(results[1].__class__, B) self.Server.delete_db("couchdbkit_test")
class ClientServerTestCase(unittest.TestCase): def setUp(self): self.couchdb = CouchdbResource() self.Server = Server() def tearDown(self): try: del self.Server["couchdbkit_test"] del self.Server["couchdbkit/test"] except: pass def testGetInfo(self): info = self.Server.info() self.assertIn("version", info) def testCreateDb(self): res = self.Server.create_db("couchdbkit_test") self.assertIsInstance(res, Database) all_dbs = self.Server.all_dbs() self.assertIn("couchdbkit_test", all_dbs) del self.Server["couchdbkit_test"] res = self.Server.create_db("couchdbkit/test") self.assertIn("couchdbkit/test", self.Server.all_dbs()) del self.Server["couchdbkit/test"] def testGetOrCreateDb(self): # create the database gocdb = self.Server.get_or_create_db("get_or_create_db") self.assertEqual(gocdb.dbname, "get_or_create_db") self.assertIn("get_or_create_db", self.Server) self.Server.delete_db("get_or_create_db") # get the database (already created) self.assertNotIn("get_or_create_db", self.Server) db = self.Server.create_db("get_or_create_db") self.assertIn("get_or_create_db", self.Server) gocdb = self.Server.get_or_create_db("get_or_create_db") self.assertEqual(db.dbname, gocdb.dbname) self.Server.delete_db("get_or_create_db") def testCreateInvalidDbName(self): def create_invalid(): res = self.Server.create_db("123ab") self.assertRaises(ValueError, create_invalid) def testServerLen(self): res = self.Server.create_db("couchdbkit_test") self.assertGreaterEqual(len(self.Server), 1) self.assertTrue(self.Server) del self.Server["couchdbkit_test"] def testServerContain(self): res = self.Server.create_db("couchdbkit_test") self.assertIn("couchdbkit_test", self.Server) del self.Server["couchdbkit_test"] def testGetUUIDS(self): uuid = self.Server.next_uuid() self.assertIsInstance(uuid, basestring) self.assertEqual(len(self.Server._uuids), 999) uuid2 = self.Server.next_uuid() self.assertNotEqual(uuid, uuid2) self.assertEqual(len(self.Server._uuids), 998)
class CouchjockTestCase(unittest2.TestCase): server_url = 'http://localhost:5984/' db_name = 'couchjock__test' schema = couchjock def setUp(self): self.server = Server(uri=self.server_url) self.db = self.server.create_db(self.db_name) def tearDown(self): self.server.delete_db(self.db_name) def test_save(self): class Foo(self.schema.Document): _db = self.db pass foo = Foo() foo.save() foo_id = foo._id self.assertIsNotNone(foo_id) foo2 = Foo.get(foo_id) self.assertEqual(foo2._id, foo_id) def test_simple_schema(self): class Foo(self.schema.Document): _db = self.db string = self.schema.StringProperty() boolean = self.schema.BooleanProperty(default=True) foo1 = Foo() foo1.save() foo1_id = foo1._id foo1_rev = foo1._rev self.assertIsNotNone(foo1_id) self.assertIsNotNone(foo1_rev) foo1 = Foo.get(foo1_id) self.assertEqual(foo1.to_json(), { 'doc_type': 'Foo', '_id': foo1_id, '_rev': foo1_rev, 'string': None, 'boolean': True, }) foo1._doc.update({'boolean': False}) self.assertEqual(foo1.boolean, False) def _individual_save(self, docs): for doc in docs: doc.save() def _bulk_save(self, docs): self.db.bulk_save(docs) def _test_simple_view(self, save_fn): class Foo(self.schema.Document): _db = self.db string = self.schema.StringProperty() foo1 = Foo(string='fun one') foo2 = Foo(string='poop') save_fn([foo1, foo2]) self.assertEqual( map(lambda x: x.to_json(), Foo.view('_all_docs', include_docs=True).all()), map(lambda x: x.to_json(), sorted([foo1, foo2], key=attrgetter('_id'))) ) def test_simple_view(self): self._test_simple_view(self._individual_save) def test_bulk_save(self): self._test_simple_view(self._bulk_save)
from pylonsapp.tests import * from couchdbkit import Server try: server = Server() if server: pass except: server = None else: try: server.delete_db('formalchemy_test') except: pass db = server.get_or_create_db('formalchemy_test') def couchdb_runing(func): if server: return func else: def f(self): pass return f class TestCouchdbController(TestController): @couchdb_runing def test_index(self): response = self.app.get('/couchdb') response.mustcontain('/couchdb/Pet/nodes') response = response.click('Pet') response.mustcontain('/couchdb/Pet/nodes/new')
class CouchDbManager(AbstractPersistenceManager): """ This is a couchdb manager for the workspace, it will load from the couchdb databases """ def __init__(self, uri): super(CouchDbManager, self).__init__() getLogger(self).debug("Initializing CouchDBManager for url [%s]" % uri) self._lostConnection = False self.__uri = uri self.__serv = NoConectionServer() self._available = False try: if uri is not None: self.testCouchUrl(uri) url = urlparse(uri) getLogger(self).debug("Setting user,pass %s %s" % (url.username, url.password)) self.__serv = Server(uri=uri) self.__serv.resource_class.credentials = (url.username, url.password) self._available = True self.pushReports() self._loadDbs() except: getLogger(self).warn("No route to couchdb server on: %s" % uri) getLogger(self).debug(traceback.format_exc()) #@trap_timeout def _create(self, name): db = self.__serv.create_db(name.lower()) return CouchDbConnector(db) #@trap_timeout def _delete(self, name): self.__serv.delete_db(name) #@trap_timeout def _loadDbs(self): conditions = lambda x: not x.startswith("_" ) and x not in CONST_BLACKDBS try: for dbname in filter(conditions, self.__serv.all_dbs()): if dbname not in self.dbs.keys(): getLogger(self).debug( "Asking for dbname[%s], registering for lazy initialization" % dbname) self.dbs[dbname] = lambda x: self._loadDb(x) except restkit.errors.RequestError as req_error: getLogger(self).error( "Couldn't load databases. " "The connection to the CouchDB was probably lost. ") def _loadDb(self, dbname): db = self.__serv.get_db(dbname) seq = db.info()['update_seq'] self.dbs[dbname] = CouchDbConnector(db, seq_num=seq) return self.dbs[dbname] def refreshDbs(self): """Refresh databases using inherited method. On exception, asume no databases are available. """ try: return AbstractPersistenceManager.refreshDbs() except: return [] #@trap_timeout def pushReports(self): vmanager = ViewsManager() reports = os.path.join(os.getcwd(), "views", "reports") try: workspace = self.__serv.get_or_create_db("reports") vmanager.addView(reports, workspace) except: getLogger(self).warn( "Reports database couldn't be uploaded. You need to be an admin to do it" ) return self.__uri + "/reports/_design/reports/index.html" def lostConnectionResolv(self): self._lostConnection = True self.__dbs.clear() self.__serv = NoConectionServer() def reconnect(self): ret_val = False ur = self.__uri if CouchDbManager.testCouch(ur): self.__serv = Server(uri=ur) self.__dbs.clear() self._lostConnection = False ret_val = True return ret_val @staticmethod def testCouch(uri): """Redirect to the module-level function of the name, which serves the same purpose and is used by other classes too.""" return test_couch(uri) def testCouchUrl(self, uri): if uri is not None: url = urlparse(uri) proto = url.scheme host = url.hostname port = url.port self.test(host, int(port)) def test(self, address, port): import socket s = socket.socket() s.settimeout(1) s.connect((address, port)) #@trap_timeout def replicate(self, workspace, *targets_dbs, **kwargs): getLogger(self).debug("Targets to replicate %s" % str(targets_dbs)) for target_db in targets_dbs: src_db_path = "/".join([self.__uri, workspace]) dst_db_path = "/".join([target_db, workspace]) try: getLogger(self).info( "workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs)) self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs) except ResourceNotFound as e: raise e except Exception as e: getLogger(self).error(e) raise def __peerReplication(self, workspace, src, dst, **kwargs): mutual = kwargs.get("mutual", True) continuous = kwargs.get("continuous", True) ct = kwargs.get("create_target", True) self.__serv.replicate(workspace, dst, mutual=mutual, continuous=continuous, create_target=ct) if mutual: self.__serv.replicate(dst, src, continuous=continuous, **kwargs)