Пример #1
0
class RevCountResolutionTestCase(ConflictResolverTestCase):
	"""
	This tests resolution that is based on rev_count, given
	the time difference between two conflicting versions
	is less than configuration.SESSION_LAG_TIME, we can't
	rely on timestamps due to server time skew. 
	So we decide the winning document based on the number 
	of modifications, assuming the one with the largest 
	number of modifs should be the current one.
	"""
	def _runTest(self):
		self.s.replicate(self.database_name, 'http://localhost:5984/'+self.repdb_name)
		self.s = Server('http://localhost:5984')
		self.repdb = self.s.get_or_create_db(self.repdb_name)
		self.replicated_doc = self.repdb.get(self.docid)
		# increasing the revision log (add 6 more revisions)
		for i in range(6):
		        self.replicated_doc['text'] = 'bigger revision number'
			self.replicated_doc['timestamp'] = str(int(round(time.time())))
			self.replicated_doc['rev_count'] = str(int(self.replicated_doc['rev_count']) + 1)
			self.repdb.save_doc(self.replicated_doc)
		# create the conflict, change the same
		# text field of the original at the source database.
		master_db = self.s.get_or_create_db(self.database_name)
		doc = master_db.get(self.docid)
		doc['text'] = 'smaller revision number'
		doc['timestamp'] = str(int(round(time.time())))
		doc['rev_count'] = str(int(doc['rev_count']) + 1)
		master_db.save_doc(doc)
		self.s.replicate('http://localhost:5984/'+self.repdb_name, self.database_name)
		doc = self.db.get(self.docid)
		self.assertEqual(doc['text'], 'bigger revision number')
		start_time = time.time()
		while (self.db.get(self.docid, conflicts=True).has_key('_conflicts')):
			pass
		end_time   = time.time()
		print "Time to conflicts clear: %s" % (end_time - start_time)
			
	def runTest(self):
		for i in range(10):
			self._runTest()
Пример #2
0
class CouchDbManager(AbstractPersistenceManager):
    """
    This is a couchdb manager for the workspace,
    it will load from the couchdb databases
    """
    def __init__(self, uri):
        super(CouchDbManager, self).__init__()
        getLogger(self).debug(
            "Initializing CouchDBManager for url [%s]" % uri)
        self._lostConnection = False
        self.__uri = uri
        self.__serv = NoConectionServer()
        self._available = False
        try:
            if uri is not None:
                self.testCouchUrl(uri)
                url = urlparse(uri)
                getLogger(self).debug(
                    "Setting user,pass %s %s" % (url.username, url.password))
                self.__serv = Server(uri=uri)
                self.__serv.resource_class.credentials = (url.username, url.password)
                self._available = True
                self.pushReports()
                self._loadDbs()
        except:
            getLogger(self).warn("No route to couchdb server on: %s" % uri)
            getLogger(self).debug(traceback.format_exc())

    #@trap_timeout
    def _create(self, name):
        db = self.__serv.create_db(name.lower())
        return CouchDbConnector(db)

    #@trap_timeout
    def _delete(self, name):
        self.__serv.delete_db(name)

    #@trap_timeout
    def _loadDbs(self):
        conditions = lambda x: not x.startswith("_") and x != 'reports'
        for dbname in filter(conditions, self.__serv.all_dbs()):
            if dbname not in self.dbs.keys():
                getLogger(self).debug(
                    "Asking for dbname[%s], registering for lazy initialization" % dbname)
                self.dbs[dbname] = lambda x: self._loadDb(x)

    def _loadDb(self, dbname):
        db = self.__serv.get_db(dbname)
        seq = db.info()['update_seq']
        self.dbs[dbname] = CouchDbConnector(db, seq_num=seq) 
        return self.dbs[dbname]


    #@trap_timeout
    def pushReports(self):
        vmanager = ViewsManager()
        reports = os.path.join(os.getcwd(), "views", "reports")
        workspace = self.__serv.get_or_create_db("reports")
        vmanager.addView(reports, workspace)
        return self.__uri + "/reports/_design/reports/index.html"

    def lostConnectionResolv(self):
        self._lostConnection = True
        self.__dbs.clear()
        self.__serv = NoConectionServer()

    def reconnect(self):
        ret_val = False
        ur = self.__uri
        if CouchDbManager.testCouch(ur):
            self.__serv = Server(uri = ur)
            self.__dbs.clear()
            self._lostConnection = False
            ret_val = True

        return ret_val

    @staticmethod
    def testCouch(uri):
        if uri is not None:
            host, port = None, None
            try:
                import socket
                url = urlparse(uri)
                proto = url.scheme
                host = url.hostname
                port = url.port

                port = port if port else socket.getservbyname(proto)
                s = socket.socket()
                s.settimeout(1)
                s.connect((host, int(port)))
            except:
                return False
            #getLogger(CouchdbManager).info("Connecting Couch to: %s:%s" % (host, port))
            return True

    def testCouchUrl(self, uri):
        if uri is not None:
            url = urlparse(uri)
            proto = url.scheme
            host = url.hostname
            port = url.port
            self.test(host, int(port))

    def test(self, address, port):
        import socket
        s = socket.socket()
        s.settimeout(1)
        s.connect((address, port))

    #@trap_timeout
    def replicate(self, workspace, *targets_dbs, **kwargs):
        getLogger(self).debug("Targets to replicate %s" % str(targets_dbs))
        for target_db in targets_dbs:
            src_db_path = "/".join([self.__uri, workspace])
            dst_db_path = "/".join([target_db, workspace])
            try:
                getLogger(self).info("workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs))
                self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs)
            except ResourceNotFound as e:
                raise e
            except Exception as e:
                getLogger(self).error(e)
                raise 

    def __peerReplication(self, workspace, src, dst, **kwargs):
        mutual = kwargs.get("mutual", True)
        continuous = kwargs.get("continuous", True)
        ct = kwargs.get("create_target", True)

        self.__serv.replicate(workspace, dst, mutual = mutual, continuous  = continuous, create_target = ct)
        if mutual:
            self.__serv.replicate(dst, src, continuous = continuous, **kwargs)
Пример #3
0
class CouchdbManager(PersistenceManager):
    """ This is a couchdb manager for the workspace, it will load from the 
    couchdb databases"""
    def __init__(self, uri):
        self._last_seq_ack = 0
        model.api.log("Initializing CouchDBManager for url [%s]" % uri)
        self._lostConnection = False
        self.__uri = uri
        self.__dbs = {} 
        self.__seq_nums = {}
        self.__serv = NoConectionServer()
        self.mutex = threading.Lock()
        self._available = False
        try:
            self.testCouchUrl(uri)
            self.__serv = Server(uri = uri)
            self._available = True
        except:
            model.api.log("No route to couchdb server on: %s" % uri)

    def isAvailable(self):
        return self._available

    def lostConnectionResolv(self): 
        self._lostConnection = True
        self.__dbs.clear()
        self.__serv = NoConectionServer()

    def reconnect(self):
        ret_val = False
        ur = self.__uri
        if CouchdbManager.testCouch(ur):
            self.__serv = Server(uri = ur)
            self.__dbs.clear()
            self._lostConnection = False
            ret_val = True

        return ret_val



    @staticmethod
    def testCouch(uri):
        host, port = None, None
        try:
            import socket
            proto, netloc, _, _, _ = urlsplit(uri)
            host, port = splitport(netloc)
            port = port if port else socket.getservbyname(proto)
            s = socket.socket()
            s.settimeout(1)
            s.connect((host, int(port)))
        except:
            return False
        model.api.log("Connecting Couch to: %s:%s" % (host, port))
        return True



    def testCouchUrl(self, uri):
        _, netloc, _, _, _ = urlsplit(uri)
        host, port = splitport(netloc)
        self.test(host, int(port))

    def test(self, address, port):
        import socket
        s = socket.socket()
        s.settimeout(1)
        s.connect((address, port))


    @trap_timeout
    def getWorkspacesNames(self):
        return filter(lambda x: not x.startswith("_"), self.__serv.all_dbs())

    def workspaceExists(self, name):
        return name in self.getWorkspacesNames()


    @trap_timeout
    def addWorkspace(self, aWorkspace):
        self.__serv.create_db(aWorkspace.lower())
        return self.__getDb(aWorkspace)

    @trap_timeout
    def addDocument(self, aWorkspaceName, documentId, aDocument):
        self.incrementSeqNumber(aWorkspaceName)
        self.__getDb(aWorkspaceName)[documentId] = aDocument

    @trap_timeout
    def saveDocument(self, aWorkspaceName, aDocument):
        self.incrementSeqNumber(aWorkspaceName)
        model.api.log("Saving document in remote workspace %s" % aWorkspaceName)
        self.__getDb(aWorkspaceName).save_doc(aDocument, use_uuids = True, force_update = True)

    @trap_timeout
    def __getDb(self, aWorkspaceName): 
        aWorkspaceName = aWorkspaceName.lower()
        model.api.log("Getting workspace [%s]" % aWorkspaceName)
        workspacedb = self.__dbs.get(aWorkspaceName, self.__serv.get_db(aWorkspaceName))
        if not self.__dbs.has_key(aWorkspaceName): 
            model.api.log("Asking couchdb for workspace [%s]" % aWorkspaceName)
            self.__dbs[aWorkspaceName] = workspacedb
            self.__seq_nums[aWorkspaceName] = workspacedb.info()['update_seq'] 
        return workspacedb

    @trap_timeout
    def getDocument(self, aWorkspaceName, documentId):
        model.api.log("Getting document for workspace [%s]" % aWorkspaceName)
        return self.__getDb(aWorkspaceName).get(documentId)

    @trap_timeout
    def checkDocument(self, aWorkspaceName, documentName):
        return  self.__getDb(aWorkspaceName).doc_exist(documentName)


    @trap_timeout
    def replicate(self, workspace, *targets_dbs, **kwargs):
        model.api.log("Targets to replicate %s" % str(targets_dbs))
        for target_db in targets_dbs:
            src_db_path = "/".join([self.__uri, workspace])
            dst_db_path = "/".join([target_db, workspace])
            try:
                model.api.devlog("workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs))
                self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs)
            except ResourceNotFound as e:
                raise e
            except Exception as e:
                model.api.devlog(e)
                raise 

    def __peerReplication(self, workspace, src, dst, **kwargs):
        mutual = kwargs.get("mutual", True)
        continuous = kwargs.get("continuous", True)
        ct = kwargs.get("create_target", True)

        self.__serv.replicate(workspace, dst, mutual = mutual, continuous  = continuous, create_target = ct)
        if mutual:
            self.__serv.replicate(dst, src, continuous = continuous, **kwargs)


    def getLastChangeSeq(self, workspaceName):
        self.mutex.acquire()
        seq = self.__seq_nums[workspaceName]
        self.mutex.release()
        return seq

    def setLastChangeSeq(self, workspaceName, seq_num):
        self.mutex.acquire()
        self.__seq_nums[workspaceName] = seq_num
        self.mutex.release()


    @trap_timeout
    def waitForDBChange(self, db_name, since = 0, timeout = 15000):
        """ Be warned this will return after the database has a change, if
        there was one before call it will return immediatly with the changes
        done"""
        changes = []
        last_seq = max(self.getLastChangeSeq(db_name), since)
        db = self.__getDb(db_name)
        with ChangesStream(db, feed="longpoll", since = last_seq, timeout = timeout) as stream:
            for change in stream:
                if change['seq'] > self.getLastChangeSeq(db_name):
                    changes.append(change)
            last_seq = reduce(lambda x,y:  max(y['seq'], x) , changes, self.getLastChangeSeq(db_name))
            self.setLastChangeSeq(db_name, last_seq)
        return changes

    @trap_timeout
    def delete_all_dbs(self):
        for db in self.__serv.all_dbs():
            self.__serv.delete_db(db)

    @trap_timeout
    def existWorkspace(self, name):
        return name in self.__serv.all_dbs()

    @trap_timeout
    def workspaceDocumentsIterator(self, workspaceName):
        return filter(lambda x: not x["id"].startswith("_"), self.__getDb(workspaceName).documents(include_docs=True))

    @trap_timeout
    def removeWorkspace(self, workspace_name):
        return self.__serv.delete_db(workspace_name) 

    @trap_timeout
    def remove(self, workspace, host_id):
        self.incrementSeqNumber(workspace)
        self.__dbs[workspace].delete_doc(host_id)

    @trap_timeout
    def compactDatabase(self, aWorkspaceName):
        self.__getDb(aWorkspaceName).compact()

    def pushReports(self):
        vmanager = ViewsManager()
        reports = os.path.join(os.getcwd(), "views", "reports")
        workspace = self.__serv.get_or_create_db("reports") 
        vmanager.addView(reports, workspace)
        return self.__uri + "/reports/_design/reports/index.html"


    def addViews(self, workspaceName):
        vmanager = ViewsManager()
        workspace = self.__getDb(workspaceName)
        for v in vmanager.getAvailableViews():
            vmanager.addView(v, workspace)

    def getViews(self, workspaceName):
        vmanager = ViewsManager()
        workspace = self.__getDb(workspaceName)
        return vmanager.getViews(workspace)

    def syncWorkspaceViews(self, workspaceName):
        vmanager = ViewsManager()
        workspace = self.__getDb(workspaceName) 
        installed_views = vmanager.getViews(workspace)
        for v in vmanager.getAvailableViews():
            if v not in installed_views: 
                vmanager.addView(v, workspace)

    def incrementSeqNumber(self, workspaceName):
        self.mutex.acquire()
        self.__seq_nums[workspaceName] += 1 
        self.mutex.release()
Пример #4
0
class CouchdbManager(PersistenceManager):
    """ This is a couchdb manager for the workspace, it will load from the 
    couchdb databases"""
    def __init__(self, uri):
        self._last_seq_ack = 0
        model.api.log("Initializing CouchDBManager for url [%s]" % uri)
        self._lostConnection = False
        self.__uri = uri
        self.__dbs = {}
        self.__seq_nums = {}
        self.__serv = NoConectionServer()
        self.mutex = threading.Lock()
        self._available = False
        try:
            self.testCouchUrl(uri)
            url = urlparse(uri)
            print("Setting user,pass %s %s" % (url.username, url.password))
            self.__serv = Server(uri=uri)
            #print dir(self.__serv)
            self.__serv.resource_class.credentials = (url.username,
                                                      url.password)
            self._available = True
        except:
            model.api.log("No route to couchdb server on: %s" % uri)
            print(traceback.format_exc())

    def isAvailable(self):
        return self._available

    def lostConnectionResolv(self):
        self._lostConnection = True
        self.__dbs.clear()
        self.__serv = NoConectionServer()

    def reconnect(self):
        ret_val = False
        ur = self.__uri
        if CouchdbManager.testCouch(ur):
            self.__serv = Server(uri=ur)
            self.__dbs.clear()
            self._lostConnection = False
            ret_val = True

        return ret_val

    @staticmethod
    def testCouch(uri):
        host, port = None, None
        try:
            import socket
            url = urlparse(uri)
            proto = url.scheme
            host = url.hostname
            port = url.port

            port = port if port else socket.getservbyname(proto)
            s = socket.socket()
            s.settimeout(1)
            s.connect((host, int(port)))
        except:
            return False
        model.api.log("Connecting Couch to: %s:%s" % (host, port))
        return True

    def testCouchUrl(self, uri):
        url = urlparse(uri)
        proto = url.scheme
        host = url.hostname
        port = url.port
        self.test(host, int(port))

    def test(self, address, port):
        import socket
        s = socket.socket()
        s.settimeout(1)
        s.connect((address, port))

    @trap_timeout
    def getWorkspacesNames(self):
        return filter(lambda x: not x.startswith("_"), self.__serv.all_dbs())

    def workspaceExists(self, name):
        return name in self.getWorkspacesNames()

    @trap_timeout
    def addWorkspace(self, aWorkspace):
        self.__serv.create_db(aWorkspace.lower())
        return self.__getDb(aWorkspace)

    @trap_timeout
    def addDocument(self, aWorkspaceName, documentId, aDocument):
        self.incrementSeqNumber(aWorkspaceName)
        self.__getDb(aWorkspaceName)[documentId] = aDocument

    @trap_timeout
    def saveDocument(self, aWorkspaceName, aDocument):
        self.incrementSeqNumber(aWorkspaceName)
        model.api.log("Saving document in remote workspace %s" %
                      aWorkspaceName)
        self.__getDb(aWorkspaceName).save_doc(aDocument,
                                              use_uuids=True,
                                              force_update=True)

    @trap_timeout
    def __getDb(self, aWorkspaceName):
        aWorkspaceName = aWorkspaceName.lower()
        model.api.log("Getting workspace [%s]" % aWorkspaceName)
        workspacedb = self.__dbs.get(aWorkspaceName,
                                     self.__serv.get_db(aWorkspaceName))
        if not self.__dbs.has_key(aWorkspaceName):
            model.api.log("Asking couchdb for workspace [%s]" % aWorkspaceName)
            self.__dbs[aWorkspaceName] = workspacedb
            self.__seq_nums[aWorkspaceName] = workspacedb.info()['update_seq']
        return workspacedb

    @trap_timeout
    def getDocument(self, aWorkspaceName, documentId):
        model.api.log("Getting document for workspace [%s]" % aWorkspaceName)
        return self.__getDb(aWorkspaceName).get(documentId)

    @trap_timeout
    def checkDocument(self, aWorkspaceName, documentName):
        return self.__getDb(aWorkspaceName).doc_exist(documentName)

    @trap_timeout
    def replicate(self, workspace, *targets_dbs, **kwargs):
        model.api.log("Targets to replicate %s" % str(targets_dbs))
        for target_db in targets_dbs:
            src_db_path = "/".join([self.__uri, workspace])
            dst_db_path = "/".join([target_db, workspace])
            try:
                model.api.devlog(
                    "workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s"
                    % (workspace, src_db_path, dst_db_path, kwargs))
                self.__peerReplication(workspace, src_db_path, dst_db_path,
                                       **kwargs)
            except ResourceNotFound as e:
                raise e
            except Exception as e:
                model.api.devlog(e)
                raise

    def __peerReplication(self, workspace, src, dst, **kwargs):
        mutual = kwargs.get("mutual", True)
        continuous = kwargs.get("continuous", True)
        ct = kwargs.get("create_target", True)

        self.__serv.replicate(workspace,
                              dst,
                              mutual=mutual,
                              continuous=continuous,
                              create_target=ct)
        if mutual:
            self.__serv.replicate(dst, src, continuous=continuous, **kwargs)

    def getLastChangeSeq(self, workspaceName):
        self.mutex.acquire()
        seq = self.__seq_nums[workspaceName]
        self.mutex.release()
        return seq

    def setLastChangeSeq(self, workspaceName, seq_num):
        self.mutex.acquire()
        self.__seq_nums[workspaceName] = seq_num
        self.mutex.release()

    @trap_timeout
    def waitForDBChange(self, db_name, since=0, timeout=15000):
        """ Be warned this will return after the database has a change, if
        there was one before call it will return immediatly with the changes
        done"""
        changes = []
        last_seq = max(self.getLastChangeSeq(db_name), since)
        db = self.__getDb(db_name)
        with ChangesStream(db,
                           feed="longpoll",
                           since=last_seq,
                           timeout=timeout) as stream:
            for change in stream:
                if change['seq'] > self.getLastChangeSeq(db_name):
                    changes.append(change)
            last_seq = reduce(lambda x, y: max(y['seq'], x), changes,
                              self.getLastChangeSeq(db_name))
            self.setLastChangeSeq(db_name, last_seq)
        return changes

    @trap_timeout
    def delete_all_dbs(self):
        for db in self.__serv.all_dbs():
            self.__serv.delete_db(db)

    @trap_timeout
    def existWorkspace(self, name):
        return name in self.__serv.all_dbs()

    @trap_timeout
    def workspaceDocumentsIterator(self, workspaceName):
        return filter(lambda x: not x["id"].startswith("_"),
                      self.__getDb(workspaceName).documents(include_docs=True))

    @trap_timeout
    def removeWorkspace(self, workspace_name):
        return self.__serv.delete_db(workspace_name)

    @trap_timeout
    def remove(self, workspace, host_id):
        self.incrementSeqNumber(workspace)
        self.__dbs[workspace].delete_doc(host_id)

    @trap_timeout
    def compactDatabase(self, aWorkspaceName):
        self.__getDb(aWorkspaceName).compact()

    def pushReports(self):
        vmanager = ViewsManager()
        reports = os.path.join(os.getcwd(), "views", "reports")
        workspace = self.__serv.get_or_create_db("reports")
        vmanager.addView(reports, workspace)
        return self.__uri + "/reports/_design/reports/index.html"

    def addViews(self, workspaceName):
        vmanager = ViewsManager()
        workspace = self.__getDb(workspaceName)
        for v in vmanager.getAvailableViews():
            vmanager.addView(v, workspace)

    def getViews(self, workspaceName):
        vmanager = ViewsManager()
        workspace = self.__getDb(workspaceName)
        return vmanager.getViews(workspace)

    def syncWorkspaceViews(self, workspaceName):
        vmanager = ViewsManager()
        workspace = self.__getDb(workspaceName)
        installed_views = vmanager.getViews(workspace)
        for v in vmanager.getAvailableViews():
            if v not in installed_views:
                vmanager.addView(v, workspace)

    def incrementSeqNumber(self, workspaceName):
        self.mutex.acquire()
        self.__seq_nums[workspaceName] += 1
        self.mutex.release()
Пример #5
0
class CouchDbManager(AbstractPersistenceManager):
    """
    This is a couchdb manager for the workspace,
    it will load from the couchdb databases
    """
    def __init__(self, uri):
        super(CouchDbManager, self).__init__()
        getLogger(self).debug(
            "Initializing CouchDBManager for url [%s]" % uri)
        self._lostConnection = False
        self.__uri = uri
        self.__serv = NoConectionServer()
        self._available = False
        try:
            if uri is not None:
                self.testCouchUrl(uri)
                url = urlparse(uri)
                getLogger(self).debug(
                    "Setting user,pass %s %s" % (url.username, url.password))
                self.__serv = Server(uri=uri)
                self.__serv.resource_class.credentials = (url.username, url.password)
                self._available = True
                self.pushReports()
                self._loadDbs()
        except:
            getLogger(self).warn("No route to couchdb server on: %s" % uri)
            getLogger(self).debug(traceback.format_exc())

    #@trap_timeout
    def _create(self, name):
        db = self.__serv.create_db(name.lower())
        return CouchDbConnector(db)

    #@trap_timeout
    def _delete(self, name):
        self.__serv.delete_db(name)

    #@trap_timeout
    def _loadDbs(self):
        conditions = lambda x: not x.startswith("_") and x != 'reports'
        for dbname in filter(conditions, self.__serv.all_dbs()):
            if dbname not in self.dbs.keys():
                getLogger(self).debug(
                    "Asking for dbname[%s], registering for lazy initialization" % dbname)
                self.dbs[dbname] = lambda x: self._loadDb(x)

    def _loadDb(self, dbname):
        db = self.__serv.get_db(dbname)
        seq = db.info()['update_seq']
        self.dbs[dbname] = CouchDbConnector(db, seq_num=seq) 
        return self.dbs[dbname]


    #@trap_timeout
    def pushReports(self):
        vmanager = ViewsManager()
        reports = os.path.join(os.getcwd(), "views", "reports")
        workspace = self.__serv.get_or_create_db("reports")
        vmanager.addView(reports, workspace)
        return self.__uri + "/reports/_design/reports/index.html"

    def lostConnectionResolv(self):
        self._lostConnection = True
        self.__dbs.clear()
        self.__serv = NoConectionServer()

    def reconnect(self):
        ret_val = False
        ur = self.__uri
        if CouchDbManager.testCouch(ur):
            self.__serv = Server(uri = ur)
            self.__dbs.clear()
            self._lostConnection = False
            ret_val = True

        return ret_val

    @staticmethod
    def testCouch(uri):
        if uri is not None:
            host, port = None, None
            try:
                import socket
                url = urlparse(uri)
                proto = url.scheme
                host = url.hostname
                port = url.port

                port = port if port else socket.getservbyname(proto)
                s = socket.socket()
                s.settimeout(1)
                s.connect((host, int(port)))
            except:
                return False
            #getLogger(CouchdbManager).info("Connecting Couch to: %s:%s" % (host, port))
            return True

    def testCouchUrl(self, uri):
        if uri is not None:
            url = urlparse(uri)
            proto = url.scheme
            host = url.hostname
            port = url.port
            self.test(host, int(port))

    def test(self, address, port):
        import socket
        s = socket.socket()
        s.settimeout(1)
        s.connect((address, port))

    #@trap_timeout
    def replicate(self, workspace, *targets_dbs, **kwargs):
        getLogger(self).debug("Targets to replicate %s" % str(targets_dbs))
        for target_db in targets_dbs:
            src_db_path = "/".join([self.__uri, workspace])
            dst_db_path = "/".join([target_db, workspace])
            try:
                getLogger(self).info("workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs))
                self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs)
            except ResourceNotFound as e:
                raise e
            except Exception as e:
                getLogger(self).error(e)
                raise 

    def __peerReplication(self, workspace, src, dst, **kwargs):
        mutual = kwargs.get("mutual", True)
        continuous = kwargs.get("continuous", True)
        ct = kwargs.get("create_target", True)

        self.__serv.replicate(workspace, dst, mutual = mutual, continuous  = continuous, create_target = ct)
        if mutual:
            self.__serv.replicate(dst, src, continuous = continuous, **kwargs)
Пример #6
0
class CouchdbManager(PersistenceManager):
    """ This is a couchdb manager for the workspace, it will load from the 
    couchdb databases"""
    def __init__(self, uri):
        self._last_seq_ack = 0
        getLogger(self).debug("Initializing CouchDBManager for url [%s]" % uri)
        self._lostConnection = False
        self.__uri = uri
        self.__dbs = {} 
        self.__seq_nums = {}
        self.__serv = NoConectionServer()
        self.mutex = threading.Lock()
        self._available = False

        #setting the doc types to load from couch
        def get_types(subclasses):
            if len(subclasses):
                head = subclasses[0]
                tail = []
                if len(subclasses[1:]):
                    tail = subclasses[1:]
                return get_types(head.__subclasses__()) + [head.class_signature] + get_types(tail)
            return []
        self._model_object_types = get_types([ModelObject])
        try:
            if uri is not None:
                self.testCouchUrl(uri)
                url = urlparse(uri)
                getLogger(self).debug("Setting user,pass %s %s" % (url.username, url.password))
                self.__serv = Server(uri=uri)
                #print dir(self.__serv)
                self.__serv.resource_class.credentials = (url.username, url.password)
                self._available = True
        except:
            getLogger(self).warn("No route to couchdb server on: %s" % uri)
            getLogger(self).debug(traceback.format_exc())

    def isAvailable(self):
        return self._available

    def lostConnectionResolv(self): 
        self._lostConnection = True
        self.__dbs.clear()
        self.__serv = NoConectionServer()

    def reconnect(self):
        ret_val = False
        ur = self.__uri
        if CouchdbManager.testCouch(ur):
            self.__serv = Server(uri = ur)
            self.__dbs.clear()
            self._lostConnection = False
            ret_val = True

        return ret_val

    @staticmethod
    def testCouch(uri):
        if uri is not None:
            host, port = None, None
            try:
                import socket
                url = urlparse(uri)
                proto = url.scheme
                host = url.hostname
                port = url.port

                port = port if port else socket.getservbyname(proto)
                s = socket.socket()
                s.settimeout(1)
                s.connect((host, int(port)))
            except:
                return False
            getLogger(CouchdbManager).info("Connecting Couch to: %s:%s" % (host, port))
            return True

    def testCouchUrl(self, uri):
        if uri is not None:
            url = urlparse(uri)
            proto = url.scheme
            host = url.hostname
            port = url.port
            self.test(host, int(port))

    def test(self, address, port):
        import socket
        s = socket.socket()
        s.settimeout(1)
        s.connect((address, port))


    @trap_timeout
    def getWorkspacesNames(self):
        return filter(lambda x: not x.startswith("_"), self.__serv.all_dbs())

    def workspaceExists(self, name):
        return name in self.getWorkspacesNames()


    @trap_timeout
    def addWorkspace(self, aWorkspace):
        self.__serv.create_db(aWorkspace.lower())
        return self._getDb(aWorkspace)

    @trap_timeout
    def addDocument(self, aWorkspaceName, documentId, aDocument):
        self._getDb(aWorkspaceName)
        self.incrementSeqNumber(aWorkspaceName)
        self._getDb(aWorkspaceName)[documentId] = aDocument

    @trap_timeout
    def saveDocument(self, aWorkspaceName, aDocument):
        self.incrementSeqNumber(aWorkspaceName)
        getLogger(self).debug("Saving document in remote workspace %s" % aWorkspaceName)
        return self._getDb(aWorkspaceName).save_doc(aDocument, use_uuids = True, force_update = True)

    def _getDb(self, aWorkspaceName):
        if not self.__dbs.has_key(aWorkspaceName):
            self.__getDb(aWorkspaceName)
        return self.__dbs.get(aWorkspaceName, None)

    @trap_timeout
    def __getDb(self, aWorkspaceName): 
        aWorkspaceName = aWorkspaceName.lower()
        getLogger(self).debug("Getting workspace [%s]" % aWorkspaceName)
        workspacedb = self.__dbs.get(aWorkspaceName, self.__serv.get_db(aWorkspaceName))
        if not self.__dbs.has_key(aWorkspaceName): 
            getLogger(self).debug("Asking couchdb for workspace [%s]" % aWorkspaceName)
            self.__dbs[aWorkspaceName] = workspacedb
            self.__seq_nums[aWorkspaceName] = workspacedb.info()['update_seq'] 


        return workspacedb

    @trap_timeout
    def getDocument(self, aWorkspaceName, documentId):
        getLogger(self).debug("Getting document for workspace [%s]" % aWorkspaceName)
        return self._getDb(aWorkspaceName).get(documentId)

    @trap_timeout
    def getDeletedDocument(self, aWorkspaceName, documentId, documentRev):
        return self._getDb(aWorkspaceName).get(documentId, rev=documentRev)

    @trap_timeout
    def checkDocument(self, aWorkspaceName, documentName):
        return  self._getDb(aWorkspaceName).doc_exist(documentName)


    @trap_timeout
    def replicate(self, workspace, *targets_dbs, **kwargs):
        getLogger(self).debug("Targets to replicate %s" % str(targets_dbs))
        for target_db in targets_dbs:
            src_db_path = "/".join([self.__uri, workspace])
            dst_db_path = "/".join([target_db, workspace])
            try:
                getLogger(self).info("workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs))
                self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs)
            except ResourceNotFound as e:
                raise e
            except Exception as e:
                getLogger(self).error(e)
                raise 

    def __peerReplication(self, workspace, src, dst, **kwargs):
        mutual = kwargs.get("mutual", True)
        continuous = kwargs.get("continuous", True)
        ct = kwargs.get("create_target", True)

        self.__serv.replicate(workspace, dst, mutual = mutual, continuous  = continuous, create_target = ct)
        if mutual:
            self.__serv.replicate(dst, src, continuous = continuous, **kwargs)


    def getLastChangeSeq(self, workspaceName):
        self.mutex.acquire()
        seq = self.__seq_nums[workspaceName]
        self.mutex.release()
        return seq

    def setLastChangeSeq(self, workspaceName, seq_num):
        self.mutex.acquire()
        self.__seq_nums[workspaceName] = seq_num
        self.mutex.release()


    @trap_timeout
    def waitForDBChange(self, db_name, since = 0, timeout = 15000):
        """ Be warned this will return after the database has a change, if
        there was one before call it will return immediatly with the changes
        done"""
        changes = []
        last_seq = max(self.getLastChangeSeq(db_name), since)
        db = self._getDb(db_name)
        with ChangesStream(db, feed="longpoll", since=last_seq, timeout=timeout) as stream:
            for change in stream:
                if change['seq'] > self.getLastChangeSeq(db_name):
                    self.setLastChangeSeq(db_name, change['seq'])
                    if not change['id'].startswith('_design'):
                        #fake doc type for deleted objects
                        doc = {'type': 'unknown', '_deleted': 'False', '_rev':[0]}
                        if not change.get('deleted'):
                            doc = self.getDocument(db_name, change['id'])
                        changes.append(change_factory.create(doc))
        if len(changes):
            getLogger(self).debug("Changes from another instance")
        return changes

    @trap_timeout
    def delete_all_dbs(self):
        for db in self.__serv.all_dbs():
            self.__serv.delete_db(db)

    @trap_timeout
    def existWorkspace(self, name):
        return name in self.__serv.all_dbs()

    @trap_timeout
    def workspaceDocumentsIterator(self, workspaceName): 
        return filter(self.filterConditions, self._getDb(workspaceName).documents(include_docs=True))

    def filterConditions(self, doc):
        ret = True
        ret = ret and not doc["id"].startswith("_")
        ret = ret and doc['doc']["type"] in self._model_object_types

        return ret

    @trap_timeout
    def removeWorkspace(self, workspace_name):
        return self.__serv.delete_db(workspace_name)

    @trap_timeout
    def remove(self, workspace, host_id):
        self.incrementSeqNumber(workspace)
        self.__dbs[workspace].delete_doc(host_id)

    @trap_timeout
    def compactDatabase(self, aWorkspaceName):
        self._getDb(aWorkspaceName).compact()

    def pushReports(self):
        vmanager = ViewsManager()
        reports = os.path.join(os.getcwd(), "views", "reports")
        workspace = self.__serv.get_or_create_db("reports") 
        vmanager.addView(reports, workspace)
        return self.__uri + "/reports/_design/reports/index.html"


    def addViews(self, workspaceName):
        vmanager = ViewsManager()
        workspace = self._getDb(workspaceName)
        for v in vmanager.getAvailableViews():
            vmanager.addView(v, workspace)

    def getViews(self, workspaceName):
        vmanager = ViewsManager()
        workspace = self._getDb(workspaceName)
        return vmanager.getViews(workspace)

    def syncWorkspaceViews(self, workspaceName):
        vmanager = ViewsManager()
        workspace = self._getDb(workspaceName) 
        installed_views = vmanager.getViews(workspace)
        for v in vmanager.getAvailableViews():
            if v not in installed_views: 
                vmanager.addView(v, workspace)

    def incrementSeqNumber(self, workspaceName):
        self.mutex.acquire()
        if not self.__seq_nums.has_key(workspaceName):
            self.__seq_nums[workspaceName] = 0
        self.__seq_nums[workspaceName] += 1 
        self.mutex.release()
Пример #7
0
class CouchDbManager(AbstractPersistenceManager):
    """
    This is a couchdb manager for the workspace,
    it will load from the couchdb databases
    """
    def __init__(self, uri, couch_exception_callback):
        super(CouchDbManager, self).__init__()
        getLogger(self).debug(
            "Initializing CouchDBManager for url [%s]" % uri)
        self._lostConnection = False
        self.__uri = uri
        self._available = False
        self.couch_exception_callback = couch_exception_callback
        test_couch_thread = threading.Thread(target=self.continuosly_check_connection)
        test_couch_thread.daemon = True
        test_couch_thread.start()
        try:
            if uri is not None:
                self.testCouchUrl(uri)
                url = urlparse(uri)
                getLogger(self).debug(
                    "Setting user,pass %s %s" % (url.username, url.password))
                self.__serv = Server(uri=uri)
                self.__serv.resource_class.credentials = (url.username, url.password)
                self._available = True
                self.pushReports()
                self._loadDbs()
        except:
            getLogger(self).warn("No route to couchdb server on: %s" % uri)
            getLogger(self).debug(traceback.format_exc())

    def continuosly_check_connection(self):
        """Intended to use on a separate thread. Call module-level
        function testCouch every second to see if response to the server_uri
        of the DB is still 200. Call the exception_callback if we can't access
        the server three times in a row.
        """
        tolerance = 0
        server_uri = self.__uri
        while True:
            time.sleep(1)
            test_was_successful = test_couch(server_uri)
            if test_was_successful:
                tolerance = 0
            else:
                tolerance += 1
                if tolerance == 3:
                    self.couch_exception_callback()
                    return False  # kill the thread if something went wrong

    def _create(self, name):
        db = self.__serv.create_db(name.lower())
        return CouchDbConnector(db)

    def _delete(self, name):
        self.__serv.delete_db(name)

    def _loadDbs(self):

        def conditions(database):
            begins_with_underscore = database.startswith("_")
            is_blacklisted = database in CONST_BLACKDBS
            return not begins_with_underscore and not is_blacklisted

        try:
            for dbname in filter(conditions, self.__serv.all_dbs()):
                if dbname not in self.dbs.keys():
                    getLogger(self).debug(
                        "Asking for dbname[%s], registering for lazy initialization" % dbname)
                    self.dbs[dbname] = lambda x: self._loadDb(x)
        except restkit.errors.RequestError as req_error:
            getLogger(self).error("Couldn't load databases. "
                                  "The connection to the CouchDB was probably lost. ")

    def _loadDb(self, dbname):
        db = self.__serv.get_db(dbname)
        seq = db.info()['update_seq']
        self.dbs[dbname] = CouchDbConnector(db, seq_num=seq)
        return self.dbs[dbname]

    def refreshDbs(self):
        """Refresh databases using inherited method. On exception, asume
        no databases are available.
        """
        try:
            return AbstractPersistenceManager.refreshDbs()
        except:
            return []

    def pushReports(self):
        vmanager = ViewsManager()
        reports = os.path.join(os.getcwd(), "views", "reports")
        try:
            workspace = self.__serv.get_or_create_db("reports")
            vmanager.addView(reports, workspace)
        except:
            getLogger(self).warn(
                "Reports database couldn't be uploaded. You need to be an admin to do it")
        return self.__uri + "/reports/_design/reports/index.html"

    @staticmethod
    def testCouch(uri):
        """Redirect to the module-level function of the name, which
        serves the same purpose and is used by other classes too."""
        return test_couch(uri)

    def testCouchUrl(self, uri):
        if uri is not None:
            url = urlparse(uri)
            host = url.hostname
            port = url.port
            self.test(host, int(port))

    def test(self, address, port):
        import socket
        s = socket.socket()
        s.settimeout(1)
        s.connect((address, port))

    def replicate(self, workspace, *targets_dbs, **kwargs):
        getLogger(self).debug("Targets to replicate %s" % str(targets_dbs))
        for target_db in targets_dbs:
            src_db_path = "/".join([self.__uri, workspace])
            dst_db_path = "/".join([target_db, workspace])
            try:
                getLogger(self).info("workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s" % (workspace, src_db_path, dst_db_path, kwargs))
                self.__peerReplication(workspace, src_db_path, dst_db_path, **kwargs)
            except ResourceNotFound as e:
                raise e
            except Exception as e:
                getLogger(self).error(e)
                raise

    def __peerReplication(self, workspace, src, dst, **kwargs):
        mutual = kwargs.get("mutual", True)
        continuous = kwargs.get("continuous", True)
        ct = kwargs.get("create_target", True)

        self.__serv.replicate(workspace, dst, mutual = mutual, continuous  = continuous, create_target = ct)
        if mutual:
            self.__serv.replicate(dst, src, continuous = continuous, **kwargs)
Пример #8
0
class ConnectionManager(object):
    """
	This class takes care of starting the wrapper SSH tunnel connections to allow
	communicating with the remote CouchDB nodes, and is responsible for restarting
	replication connections for nodes that go back into online state as reported
	by the NodeProbe.
	"""

    def __init__(self, hosts, database_name="session_store"):
        self.logger = configuration.get_logger(logging_instance=connlogging, system_name="connmanager")
        configuration.info(self.logger)
        self.database_name = database_name
        self.hosts = hosts
        self.online = []
        self.ip = self.get_ip_address()
        self.logger.info("= ConnectionManager instantiated =")
        self.logger.info("My IP -> %s" % self.ip)
        self.logger.info("Targets: %s" % self.hosts)
        # Create an instance to the local server
        self.dbserver = Server()
        self.node_probe = None  # reference for the NodeProbe object

    def manage(self):
        """This encapsulates one monitoring run"""
        # clear latest run results
        # (if not our lists get inifnitely populated)
        self.online = []
        self.offline = []
        # start the monitor run
        self.logger.info("Monitor run for %s" % self.hosts)
        self.node_probe = NodeProbe(self.hosts, self.online, self.offline)
        self.node_probe.survey()
        self.node_probe.wait_finish()
        print "online: %s" % self.online
        print "offline: %s" % self.offline
        for i in self.online:
            self.logger.info("%s online. Restarting connection." % i[1])
            self.restartConnection(i[1])

    def get_ip_address(self):
        return configuration.MY_IP

    def restartConnection(self, target_uri):
        """
		This wrapper may look redundant, but it is here to
		remind us that eventually SSH tunneling will be handled by
		'self.startTunnel' and will be called before 'self.continuousReplication'.

		This will mandate the translation of the real ip addresses to localhost
		and respective port to make the SSH tunneling transparent to users of 
		the ConnectionManager. So once it is finished, target nodes list will be fetched
		from the configuration CouchDB db and translated to tunnel invocations 
		and localhost replication connections.
		"""
        self.logger.info("Restarting conn. for  %s" % target_uri)
        self.continuousReplication(target_uri, self.database_name)

    def manageForever(self, interval=30):
        while True:
            self.manage()
            time.sleep(interval)

    def startTunnel(self, local_port, local_host, target_port, target_host):
        """
		If not already started, start a new autoSSH process to 
		keep the connection to the target.
		If the autossh process is already there, leave it since autossh
		takes care of maintaining the connection.

		Return the 'http://localhost:900x' equivalent for the real ip and port.
		e.g: 'http://79.143.23.119:5984' --> 'http://localhost:9001'
		This enables transparent restart of the CouchDB plain text HTTP replication
		connections.
		"""

    def continuousReplication(self, target_uri, database_name):
        """
		Stop continuous replication to target_uri if exists.
		then, start it fresh.
		< rnewson> sivang: you can cancel a replication with "cancel":true but 
		they are not automatically restarted if they crash. However:
		According to rnewson starting connection reuses a previous connection if it existed,
		we don't really need to do anything.
		Just - Start the continuous replication again for every node that came back online,
		and forget about it! yes, it is THAT easy.
		This is after all, CouchDB. Time to relax.
		"""
        target_uri_db = "%s/%s" % (target_uri, database_name)
        self.logger.info("Start Cont. rpct. : %s" % target_uri_db)
        # direction of replication is changed to pull replication instead
        # of push, as recommended by the CouchDB wiki for better performance.
        self.dbserver.replicate(source=self.database_name, target=target_uri_db, continuous=True)
Пример #9
0
class CouchDbManager(AbstractPersistenceManager):
    """
    This is a couchdb manager for the workspace,
    it will load from the couchdb databases
    """
    def __init__(self, uri):
        super(CouchDbManager, self).__init__()
        getLogger(self).debug("Initializing CouchDBManager for url [%s]" % uri)
        self._lostConnection = False
        self.__uri = uri
        self.__serv = NoConectionServer()
        self._available = False
        try:
            if uri is not None:
                self.testCouchUrl(uri)
                url = urlparse(uri)
                getLogger(self).debug("Setting user,pass %s %s" %
                                      (url.username, url.password))
                self.__serv = Server(uri=uri)
                self.__serv.resource_class.credentials = (url.username,
                                                          url.password)
                self._available = True
                self.pushReports()
                self._loadDbs()
        except:
            getLogger(self).warn("No route to couchdb server on: %s" % uri)
            getLogger(self).debug(traceback.format_exc())

    #@trap_timeout
    def _create(self, name):
        db = self.__serv.create_db(name.lower())
        return CouchDbConnector(db)

    #@trap_timeout
    def _delete(self, name):
        self.__serv.delete_db(name)

    #@trap_timeout
    def _loadDbs(self):
        conditions = lambda x: not x.startswith("_"
                                                ) and x not in CONST_BLACKDBS
        try:
            for dbname in filter(conditions, self.__serv.all_dbs()):
                if dbname not in self.dbs.keys():
                    getLogger(self).debug(
                        "Asking for dbname[%s], registering for lazy initialization"
                        % dbname)
                    self.dbs[dbname] = lambda x: self._loadDb(x)
        except restkit.errors.RequestError as req_error:
            getLogger(self).error(
                "Couldn't load databases. "
                "The connection to the CouchDB was probably lost. ")

    def _loadDb(self, dbname):
        db = self.__serv.get_db(dbname)
        seq = db.info()['update_seq']
        self.dbs[dbname] = CouchDbConnector(db, seq_num=seq)
        return self.dbs[dbname]

    def refreshDbs(self):
        """Refresh databases using inherited method. On exception, asume
        no databases are available.
        """
        try:
            return AbstractPersistenceManager.refreshDbs()
        except:
            return []

    #@trap_timeout
    def pushReports(self):
        vmanager = ViewsManager()
        reports = os.path.join(os.getcwd(), "views", "reports")
        try:
            workspace = self.__serv.get_or_create_db("reports")
            vmanager.addView(reports, workspace)
        except:
            getLogger(self).warn(
                "Reports database couldn't be uploaded. You need to be an admin to do it"
            )
        return self.__uri + "/reports/_design/reports/index.html"

    def lostConnectionResolv(self):
        self._lostConnection = True
        self.__dbs.clear()
        self.__serv = NoConectionServer()

    def reconnect(self):
        ret_val = False
        ur = self.__uri
        if CouchDbManager.testCouch(ur):
            self.__serv = Server(uri=ur)
            self.__dbs.clear()
            self._lostConnection = False
            ret_val = True

        return ret_val

    @staticmethod
    def testCouch(uri):
        """Redirect to the module-level function of the name, which
        serves the same purpose and is used by other classes too."""
        return test_couch(uri)

    def testCouchUrl(self, uri):
        if uri is not None:
            url = urlparse(uri)
            proto = url.scheme
            host = url.hostname
            port = url.port
            self.test(host, int(port))

    def test(self, address, port):
        import socket
        s = socket.socket()
        s.settimeout(1)
        s.connect((address, port))

    #@trap_timeout
    def replicate(self, workspace, *targets_dbs, **kwargs):
        getLogger(self).debug("Targets to replicate %s" % str(targets_dbs))
        for target_db in targets_dbs:
            src_db_path = "/".join([self.__uri, workspace])
            dst_db_path = "/".join([target_db, workspace])
            try:
                getLogger(self).info(
                    "workspace: %s, src_db_path: %s, dst_db_path: %s, **kwargs: %s"
                    % (workspace, src_db_path, dst_db_path, kwargs))
                self.__peerReplication(workspace, src_db_path, dst_db_path,
                                       **kwargs)
            except ResourceNotFound as e:
                raise e
            except Exception as e:
                getLogger(self).error(e)
                raise

    def __peerReplication(self, workspace, src, dst, **kwargs):
        mutual = kwargs.get("mutual", True)
        continuous = kwargs.get("continuous", True)
        ct = kwargs.get("create_target", True)

        self.__serv.replicate(workspace,
                              dst,
                              mutual=mutual,
                              continuous=continuous,
                              create_target=ct)
        if mutual:
            self.__serv.replicate(dst, src, continuous=continuous, **kwargs)