Esempio n. 1
0
 def _createConn(self):
     try:
         cluster = Cluster(self.connection_string, bucket_class=CouchbaseBucket)
         cluster.authenticate(PasswordAuthenticator(self.bucket, 'password'))
         self.cb = cluster.open_bucket(self.bucket)
     except BucketNotFoundError:
          raise
Esempio n. 2
0
    def create_xattr_data(self, type="system"):
        cluster = Cluster('couchbase://'+str(self.master.ip))
        authenticator = PasswordAuthenticator(self.username, self.password)
        cluster.authenticate(authenticator)
        cb = cluster.open_bucket('default')
        docs = self.get_meta_ids()
        self.log.info("Docs: " + str(docs[0:5]))
        xattr_data = []
        self.log.info("Adding xattrs to data")
        val = 0
        for doc in docs:
            if type == "system":
                rv = cb.mutate_in(doc["id"], SD.upsert('_system1', val, xattr=True, create_parents=True))
                xattr_data.append({'_system1': val})
                rv = cb.mutate_in(doc["id"], SD.upsert('_system2', {'field1': val, 'field2': val*val}, xattr=True, create_parents=True))
                xattr_data.append({'_system2': {'field1': val, 'field2': val*val}})
                rv = cb.mutate_in(doc["id"], SD.upsert('_system3', {'field1': {'sub_field1a': val, 'sub_field1b': val*val}, 'field2': {'sub_field2a': 2*val, 'sub_field2b': 2*val*val}}, xattr=True, create_parents=True))
                xattr_data.append({'_system3': {'field1': {'sub_field1a': val, 'sub_field1b': val*val}, 'field2': {'sub_field2a': 2*val, 'sub_field2b': 2*val*val}}})
            if type == "user":
                rv = cb.mutate_in(doc["id"], SD.upsert('user1', val, xattr=True, create_parents=True))
                xattr_data.append({'user1': val})
                rv = cb.mutate_in(doc["id"], SD.upsert('user2', {'field1': val, 'field2': val*val}, xattr=True, create_parents=True))
                xattr_data.append({'user2': {'field1': val, 'field2': val*val}})
                rv = cb.mutate_in(doc["id"], SD.upsert('user3', {'field1': {'sub_field1a': val, 'sub_field1b': val*val}, 'field2': {'sub_field2a': 2*val, 'sub_field2b': 2*val*val}}, xattr=True, create_parents=True))
                xattr_data.append({'user3': {'field1': {'sub_field1a': val, 'sub_field1b': val*val}, 'field2': {'sub_field2a': 2*val, 'sub_field2b': 2*val*val}}})
            val = val + 1

        self.log.info("Completed adding " + type + "xattrs to data to " + str(val) + " docs")
        return xattr_data
 def test_PYCBC_488(self):
     cluster = Cluster('couchbases://10.142.175.101?certpath=/Users/daschl/tmp/ks/chain.pem&keypath=/Users/daschl/tmp/ks/pkey.key')
     with self.assertRaises(MixedAuthError) as maerr:
         cluster.open_bucket("pixels",
                              password=self.cluster_info.bucket_password)
     exception = maerr.exception
     self.assertIsInstance(exception, MixedAuthError)
     self.assertRegex(exception.message, r'.*CertAuthenticator.*password.*')
 def test_PYCBC_489(self):
     from couchbase.cluster import Cluster
     with self.assertRaises(MixedAuthError) as maerr:
         cluster = Cluster('couchbases://10.142.175.101?certpath=/Users/daschl/tmp/ks/chain.pem&keypath=/Users/daschl/tmp/ks/pkey.key')
         cb = cluster.open_bucket('pixels', password = '******')
         cb.upsert('u:king_arthur', {'name': 'Arthur', 'email': '*****@*****.**', 'interests': ['Holy Grail', 'African Swallows']})
     exception = maerr.exception
     self.assertIsInstance(exception, MixedAuthError)
     self.assertRegex(exception.message, r'.*CertAuthenticator-style.*password.*')
Esempio n. 5
0
class SwapRebalanceBase(unittest.TestCase):

    @staticmethod
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.cluster_run = False
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if len(set([server.ip for server in self.servers])) == 1:
            ip = rest.get_nodes_self().ip
            for server in self.servers:
                server.ip = ip
            self.cluster_run = True
        self.case_number = self.input.param("case_number", 0)
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 1000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)
        self.load_started = False
        self.loaders = []
        try:
            # Clear the state from Previous invalid run
            if rest._rebalance_progress_status() == 'running':
                self.log.warning("rebalancing is still running, previous test should be verified")
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
            SwapRebalanceBase.reset(self)
            self.cluster_helper = Cluster()

            # Make sure the test is setup correctly
            min_servers = int(self.num_initial_servers) + int(self.num_swap)
            msg = "minimum {0} nodes required for running swap rebalance"
            self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))

            self.log.info('picking server : {0} as the master'.format(serverInfo))
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
            info = rest.get_nodes_self()
            rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
            if self.num_buckets > 10:
                BaseTestCase.change_max_buckets(self, self.num_buckets)
            self.log.info("==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                      .format(self.case_number, self._testMethodName))
            SwapRebalanceBase._log_start(self)
        except Exception, e:
            self.cluster_helper.shutdown()
            self.fail(e)
Esempio n. 6
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        #initial number of items in the cluster
        self.nodes_init = self.input.param("nodes_init", 1)

        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        if str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1:
            #rebalance all nodes into the cluster before each test
            self.cluster.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [])
        elif self.nodes_init > 1:
            self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)
    def _create_cluster(self):
        connargs = self.make_connargs()
        connstr = ConnectionString.parse(str(connargs.pop('connection_string')))
        connstr.clear_option('username')
        bucket = connstr.bucket
        connstr.bucket = None
        password = connargs.get('password', '')

        # Can I open a new bucket via open_bucket?
        cluster = Cluster(connstr, bucket_class=self.factory)
        cluster.authenticate(ClassicAuthenticator(buckets={bucket: password},cluster_password=self.cluster_info.admin_password, cluster_username=self.cluster_info.admin_username))
        return cluster, bucket
 def _create_cluster_clean(self, authenticator):
     connargs = self.make_connargs()
     connstr = ConnectionString.parse(str(connargs.pop('connection_string')))
     connstr.clear_option('username')
     bucket = connstr.bucket
     connstr.bucket = None
     password = connargs.get('password', None)
     keys_to_skip = authenticator.get_credentials(bucket)['options'].keys()
     for entry in keys_to_skip:
         connstr.clear_option(entry)
     cluster = Cluster(connstr, bucket_class=self.factory)
     cluster.authenticate(ClassicAuthenticator(buckets={bucket: password}))
     return cluster, bucket
Esempio n. 9
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.cluster = Cluster()
        self.servers = self.input.servers
        self.buckets = {}

        self.default_bucket = self.input.param("default_bucket", True)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)

        if not self.input.param("skip_cleanup", False):
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)

        self.quota = self._initialize_nodes(self.cluster, self.servers)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
            self.buckets['default'] = {1 : KVStore()}
        self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
Esempio n. 10
0
 def create_connections(self):
     """
     Create bucket connections. 5 bucket connections are created per instance.
     :return: Nothing
     """
     for i in range(0, self.threads):
         if self.cb_version > '5':
             cluster = Cluster(self.spec)
             auth = PasswordAuthenticator(self.user, self.password)
             cluster.authenticate(auth)
             bucket = cluster.open_bucket(self.bucket_name, lockmode=LOCKMODE_WAIT)
             bucket.timeout = self.timeout
             self.connections.append(bucket)
         else:
             bucket = Bucket('{0}/{1}'.format(self.spec, self.bucket_name), lockmode=LOCKMODE_WAIT)
             bucket.timeout = self.timeout
             self.connections.append(bucket)
Esempio n. 11
0
 def post_deploy():
     data = request.body.readline()
     if not data:
       abort(400, 'No data received')
     entity = json.loads(data)
     cluster = Cluster("{0}".format(db_endpoint))
     authenticator = PasswordAuthenticator("{0}".format(username_db),"{0}".format(password_db))
     cluster.authenticate(authenticator)
     cb = cluster.open_bucket("{0}".format(bucket_name))
     component = entity['component']
     version = entity['version']
     owner = entity['owner']
     status = entity['status']
     nextIdNumber = cb.counter("id", 1, 0).value;
     timedb = time()
     print time
     id=str(nextIdNumber)
     data2 = { 'component': component, 'version': version, 'status': status, 'owner': owner, 'time': timedb}
     datadb = json.dumps(data2,sort_keys=True)
     cb.upsert(id, datadb)
     return datadb
Esempio n. 12
0
 def connect_cb(self):
     """
     create a couchbase connection and maintain the collection and cluster objects.
     """
     logging.info("%s: data_type_manager - Connecting to couchbase")
     # get a reference to our cluster
     # noinspection PyBroadException
     try:
         options = ClusterOptions(
             PasswordAuthenticator(
                 self.cb_credentials["user"], self.cb_credentials["password"]
             )
         )
         self.cluster = Cluster(
             "couchbase://" + self.cb_credentials["host"], options
         )
         self.collection = self.cluster.bucket("mdata").default_collection()
         logging.info("%s: Couchbase connection success")
     except Exception as _e:  # pylint:disable=broad-except
         logging.error("*** %s in connect_cb ***", str(_e))
         sys.exit("*** Error when connecting to mysql database: ")
Esempio n. 13
0
    def connect(self, host, bucket, username, password):
        self.host = host
        self.bucket_name = bucket
        self.username = username
        self.password = password

        connection_str = 'couchbase://{0}'.format(self.host)

        try:
            self.cluster = Cluster(connection_str)
            authenticator = PasswordAuthenticator(self.username, self.password)
            self.cluster.authenticate(authenticator)

            self.bucket = self.cluster.open_bucket(self.bucket_name)
        except Exception as error:
            print('Could not open bucket: {0}.  Error: {1}'.format(
                self.bucket_name, error))
            raise

        self.connected = True
        return self.connected
Esempio n. 14
0
 def connection(self, client_ip, bucket_name, user, password):
     log.info(
         "Bucket name for connection is ---- {0}, username -- {1}, ----- password -- {2}".format(bucket_name, user, \
                                                                                                 password))
     result = False
     connection_string = 'couchbase://' + client_ip + '/' + bucket_name + '?username='******'&select_bucket=true'
     log.info(
         " Value of connection string is - {0}".format(connection_string))
     time.sleep(2)
     try:
         cluster = Cluster(
             'couchbase://' + client_ip,
             ClusterOptions(PasswordAuthenticator(user, password)))
         cb = cluster.bucket(bucket_name)
         default_collection = cb.default_collection()
         if cb is not None:
             result = True
             return default_collection, result
     except Exception as ex:
         log.info("Exception in creating an SDK connection {0}".format(ex))
         return result
Esempio n. 15
0
 def wait_for_replication(servers, cluster_helper=None, timeout=600):
     if cluster_helper is None:
         cluster = Cluster()
     else:
         cluster = cluster_helper
     tasks = []
     rest = RestConnection(servers[0])
     buckets = rest.get_buckets()
     for server in servers:
         for bucket in buckets:
             for server_repl in list(set(servers) - set([server])):
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server], bucket, 'tap',
                         'eq_tapq:replication_ns_1@' + server_repl.ip +
                         ':idle', '==', 'true'))
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server], bucket, 'tap',
                         'eq_tapq:replication_ns_1@' + server_repl.ip +
                         ':backfill_completed', '==', 'true'))
     try:
         for task in tasks:
             task.result(timeout)
     finally:
         if cluster_helper is None:
             # stop all newly created task manager threads
             cluster.shutdown()
         return True
Esempio n. 16
0
 def setUp(self):
     try:
         self.log = logger.Logger.get_logger()
         self.input = TestInputSingleton.input
         self.servers = self.input.servers
         self.browser = self.input.ui_conf['browser']
         self.replica  = self.input.param("replica", 1)
         self.case_number = self.input.param("case_number", 0)
         self.cluster = Cluster()
         self.machine = self.input.ui_conf['server']
         self.driver = None
         self.shell = RemoteMachineShellConnection(self.machine)
         #avoid clean up if the previous test has been tear down
         if not self.input.param("skip_cleanup", True) \
                                         or self.case_number == 1:
             self.tearDown()
         self._log_start(self)
         #thread for selenium server
         if not self._is_selenium_running():
             self.log.info('start selenium')
             self._start_selenium_thread()
             self._wait_for_selenium_is_started()
         self.log.info('start selenium session')
         if self.browser == 'ff':
             self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                            .format(self.machine.ip,
                                                    self.machine.port),
                                            desired_capabilities=DesiredCapabilities.FIREFOX)
         elif self.browser == 'chrome':
             self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                            .format(self.machine.ip,
                                                    self.machine.port),
                                            desired_capabilities=DesiredCapabilities.CHROME)
         self.log.info('start selenium started')
         self.driver.get("http://{0}:{1}".format(self.servers[0].ip,
                                                 self.servers[0].port))
         self.driver.maximize_window()
     except Exception as ex:
         skip_setup_failed = True
         self.fail(ex)
Esempio n. 17
0
    def test_create_delete_similar_views(self):
        ddoc_name_prefix = self.input.param("ddoc_name_prefix", "ddoc")
        view_name = self.input.param("view_name", "test_view")
        map_fn = 'function (doc) {if(doc.age !== undefined) { emit(doc.age, doc.name);}}'
        rest = RestConnection(self.servers[0])
        ddocs = [DesignDocument(ddoc_name_prefix + "1", [View(view_name, map_fn,
                                                             dev_view=False)],
                                options={"updateMinChanges":0, "replicaUpdateMinChanges":0}),
                DesignDocument(ddoc_name_prefix + "2", [View(view_name, map_fn,
                                                            dev_view=True)],
                               options={"updateMinChanges":0, "replicaUpdateMinChanges":0})]

        ViewBaseTests._load_docs(self, self.num_docs, "test_")
        for ddoc in ddocs:
            results = self.create_ddoc(rest, 'default', ddoc)

        try:
            cluster = Cluster()
            cluster.delete_view(self.servers[0], ddocs[1].name, ddocs[1].views[0])
        finally:
            cluster.shutdown()

        results_new = rest.query_view(ddocs[0].name, ddocs[0].views[0].name, 'default',
                                  {"stale" : "ok", "full_set" : "true"})
        self.assertEquals(results.get(u'rows', []), results_new.get(u'rows', []),
                          "Results returned previosly %s don't match with current %s" % (
                          results.get(u'rows', []), results_new.get(u'rows', [])))
Esempio n. 18
0
 def test_can_override_tracing_options(self):
     timeout = timedelta(seconds=50)
     timeout2 = timedelta(seconds=100)
     opts = self._create_cluster_opts(
         tracing_options=ClusterTracingOptions(tracing_orphaned_queue_flush_interval=timeout))
     args = self._mock_hack()
     args.update({'tracing_options': ClusterTracingOptions(
         tracing_orphaned_queue_flush_interval=timeout2)})
     cluster = Cluster.connect(self.cluster.connstr, opts, **args)
     self.assertEqual(
         timeout2, cluster.tracing_orphaned_queue_flush_interval)
     b = cluster.bucket(self.bucket_name)
     self.assertEqual(timeout2, b.tracing_orphaned_queue_flush_interval)
 def test_disconnect(self):
     # for this test we need a new cluster...
     if self.is_mock:
         raise SkipTest("query not mocked")
     cluster = Cluster.connect(self.cluster.connstr, ClusterOptions(
         PasswordAuthenticator(self.cluster_info.admin_username, self.cluster_info.admin_password)))
     # Temporarily, lets open a bucket to insure the admin object was created
     b = cluster.bucket(self.bucket_name)
     # verify that we can get a bucket manager
     self.assertIsNotNone(cluster.buckets())
     # disconnect cluster
     cluster.disconnect()
     self.assertRaises(AlreadyShutdownException, cluster.buckets)
Esempio n. 20
0
    def setUp(self):
        try:
            self._log = logger.Logger.get_logger()
            self._input = TestInputSingleton.input
            self._init_parameters()
            self._cluster_helper = Cluster()
            self._log.info("==============  XDCRbasetests setup was started for test #{0} {1}=============="\
                .format(self._case_number, self._testMethodName))
            if not self._input.param("skip_cleanup", False):
                self._cleanup_previous_setup()

            self._init_clusters(self._disabled_consistent_view)
            self.setup_extended()
            self._log.info("==============  XDCRbasetests setup was finished for test #{0} {1} =============="\
                .format(self._case_number, self._testMethodName))
            self._log_start(self)
        except Exception as e:
            self._log.error(e.message)
            self._log.error("Error while setting up clusters: %s",
                            sys.exc_info())
            self._cleanup_broken_setup()
            raise
Esempio n. 21
0
 def wait_for_replication(servers, cluster_helper=None, timeout=600):
     if cluster_helper is None:
         cluster = Cluster()
     else:
         cluster = cluster_helper
     tasks = []
     rest = RestConnection(servers[0])
     buckets = rest.get_buckets()
     for server in servers:
         for bucket in buckets:
             for server_repl in list(set(servers) - set([server])):
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server],
                         bucket,
                         "tap",
                         "eq_tapq:replication_ns_1@" + server_repl.ip + ":idle",
                         "==",
                         "true",
                     )
                 )
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server],
                         bucket,
                         "tap",
                         "eq_tapq:replication_ns_1@" + server_repl.ip + ":backfill_completed",
                         "==",
                         "true",
                     )
                 )
     try:
         for task in tasks:
             task.result(timeout)
     finally:
         if cluster_helper is None:
             # stop all newly created task manager threads
             cluster.shutdown()
         return True
Esempio n. 22
0
    def __xdcr_create(self):
        """Creates replications between host and xdcr_hosts.
        """
        if not self.xdcr_hosts:
            return
        if not isinstance(self.xdcr_hosts, dict):
            print("xdcr_hosts not a dictionary. Ignoring")
            return

        xdcrs = self.xdcr_hosts.get(self.host)
        xdcrs = xdcrs if xdcrs is not None else []
        for xdcr_host in xdcrs:
            xdcr_cluster = Cluster('couchbase://' + xdcr_host)
            xdcr_cluster.authenticate(
                PasswordAuthenticator(self.username, self.password))

            # Create bucket in External Cluster
            self.__bucket_create(xdcr_cluster, xdcr_host, sync=True)

            # host > external
            createXDCR(host=self.host,
                       port=self.port,
                       username=self.username,
                       password=self.password,
                       fromBucket=self.name,
                       toCluster=xdcr_host,
                       toBucket=self.name)

            # external > host
            r_xdcrs = self.xdcr_hosts.get(xdcr_host)
            r_xdcrs = r_xdcrs if r_xdcrs is not None else []
            if self.host in r_xdcrs:
                createXDCR(host=xdcr_host,
                           username=self.username,
                           password=self.password,
                           fromBucket=self.name,
                           toCluster=self.host,
                           toBucket=self.name)
Esempio n. 23
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.cluster = Cluster()
        self.servers = self.input.servers
        self.buckets = {}
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        self.quota = self._initialize_nodes(self.cluster, self.servers)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
            self.buckets[self.default_bucket_name] = {1 : KVStore()}
        self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        # TODO (Mike): Create Standard buckets
        self._log_start(self)
Esempio n. 24
0
 def _createConn(self):
     try:
         cluster = Cluster(self.connection_string, bucket_class=CouchbaseBucket)
         cluster.authenticate(PasswordAuthenticator(self.bucket, 'password'))
         self.cb = cluster.open_bucket(self.bucket)
     except BucketNotFoundError:
          raise
     except AuthError:
         # Try using default user created by the tests, if any, in case there is no user with bucket name in the
         # cluster.
         try:
             cluster = Cluster(self.connection_string, bucket_class=CouchbaseBucket)
             cluster.authenticate(PasswordAuthenticator("cbadminbucket", 'password'))
             self.cb = cluster.open_bucket(self.bucket)
         except AuthError:
             raise
Esempio n. 25
0
 def uploadDoc(self):
     # connect to cb cluster
     try:
         connection = "couchbase://" + self.server
         cluster = Cluster(connection)
         authenticator = PasswordAuthenticator(self.username, self.password)
         cluster.authenticate(authenticator)
         cb = cluster.open_bucket(self.bucket)
     except Exception as e:
         logging.error("Connection error\n" + traceback.format_exc())
     ttls = [0, 60, 120, 180, 240, 300, 360, 3600, 7200]
     for i in range(self.startseqnum, self.startseqnum + self.num_docs):
         logging.info("generating doc: " + str(i))
         self.createContent()
         dockey = "edgyjson-" + str(i) + "-" + str(
             datetime.datetime.now())[:19]
         try:
             cb.upsert(str(dockey),
                       self.json_objs_dict,
                       ttl=random.choice(ttls))
             logging.info("upsert: " + dockey)
         except Exception as e:
             logging.error("Upload error\n" + traceback.format_exc())
Esempio n. 26
0
def connect_to_cluster(host: str,
                       user: str,
                       password: str,
                       bucket: str,
                       services: List[ServiceType] = [ServiceType.Query]):
    """Creates a connection to a cluster and checks its connected to the given services before returning."""
    cluster = Cluster(host,
                      ClusterOptions(PasswordAuthenticator(user, password)))
    cb = cluster.bucket(bucket)  # pylint: disable=unused-variable
    for _ in range(100):
        result = cb.ping(PingOptions(service_types=services))
        ready = True
        for service in services:
            try:
                if result.endpoints[service][0].state != PingState.OK:
                    ready = False
            except (KeyError, IndexError) as e:
                raise AssertionError(
                    f"Service {service.value} not available") from e
        if ready:
            return cluster, cb
        time.sleep(1)
    raise AssertionError("Failed to connect to cluster")
Esempio n. 27
0
    def setUp(self):
        self.cluster = Cluster()

        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.num_servers = self.input.param("servers", 1)

        master = self.servers[0]
        num_replicas = self.input.param("replicas", 1)
        self.bucket = 'default'

        # Start: Should be in a before class function
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterOperationHelper.wait_for_ns_servers_or_assert([master], self)
        # End: Should be in a before class function

        self.quota = self.cluster.init_node(master)
        self.old_vbuckets = self._get_vbuckets(master)
        ClusterOperationHelper.set_vbuckets(master, 1)
        self.cluster.create_default_bucket(master, self.quota, num_replicas)
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])
Esempio n. 28
0
def rerun_worse(cluster: Cluster, job, options):
    query = "select raw os.`{}`.`{}`.`{}` from greenboard where `build` = '{}' and type = 'server'".format(
        job["os"], job["component"], job["name"], options.build)

    all_runs = list(cluster.query(query))[0]

    # we will only try a worse rerun again if there was a rerun and the number of worse reruns is less than `max_failed_reruns`
    if len(all_runs) < 2 or len(all_runs) > (options.max_failed_reruns + 1):
        return False

    latest_rerun = all_runs[0]
    fresh_run = all_runs[len(all_runs) - 1]

    return latest_rerun["failCount"] > fresh_run["failCount"] or latest_rerun[
        "totalCount"] < fresh_run["totalCount"]
 def test_cluster_may_need_open_bucket_before_admin_calls(self):
     # NOTE: some admin calls -- like listing query indexes, seem to require
     # that the admin was given a bucket.  That can only happen if we have already
     # opened a bucket, which is what usually happens in the tests.  This does not, and
     # checks for the exception when appropriate.
     if self.is_mock:
         raise SkipTest("mock doesn't support the admin call we are making")
     cluster = Cluster.connect(self.cluster.connstr, self._create_cluster_opts(), **self._mock_hack())
     if cluster._is_6_5_plus():
         self.assertIsNotNone(cluster.query_indexes().get_all_indexes(self.bucket_name))
     else:
         # since we called cluster._is_6_5_plus(), that creates an admin under the hood to do
         # the http call.  Thus, we won't get the NoBucketException in this case, we get an
         # NotSupportedException instead.  Normally, one would use the public api and not hit that,
         # getting the NoBucketException instead.
         self.assertRaises(NotSupportedException, cluster.query_indexes().get_all_indexes, self.bucket_name)
Esempio n. 30
0
class CouchbaseHelper:
    password_authenticator = PasswordAuthenticator(CouchbaseConfig.USERNAME, CouchbaseConfig.PASSWORD)
    cluster_options = ClusterOptions(password_authenticator)

    cluster = Cluster(CouchbaseConfig.URL, cluster_options)
    bucket = cluster.bucket(CouchbaseConfig.BUCKET_NANE)
    collection = bucket.default_collection()

    def __init__(self, *args, **kwargs):
        self.id = str(uuid.uuid4())

    @classmethod
    def create(cls, obj, schema_cls):
        schema = schema_cls()
        result = schema.dump(obj)
        cls.collection.upsert(str(uuid.uuid4()), result)
Esempio n. 31
0
def get_cluster():
    try:
        cluster = Cluster('couchbase://{}'.format(host))
        authenticator = PasswordAuthenticator('Administrator', 'password')
        cluster.authenticate(authenticator)
        return cluster
    except Exception:
        cluster = Cluster(
            'couchbase://{}'.format(host),
            ClusterOptions(PasswordAuthenticator('Administrator', 'password')))
        return cluster
Esempio n. 32
0
class CouchDbAccess:
    def __init__(self):
        # using a local server
        self.cluster = Cluster(
            'couchbase://localhost:8091',
            ClusterOptions(
                PasswordAuthenticator('Administrator', 'password1234')))

    def upsert_doc(self, doc, doc_id, bucket_name):
        print("Upsert document: ")
        cb = self.cluster.bucket(bucket_name)
        coll = cb.default_collection()
        try:
            result = coll.insert(doc_id, doc)
            print(result.cas)
        except Exception as e:
            print(e)
 def test_cluster_may_need_open_bucket_before_admin_calls(self):
     # NOTE: some admin calls -- like listing query indexes, seem to require
     # that the admin was given a bucket.  That can only happen if we have already
     # opened a bucket, which is what usually happens in the tests.  This does not, and
     # checks for the exception when appropriate.
     if self.is_mock:
         raise SkipTest("mock doesn't support the admin call we are making")
     cluster = Cluster.connect(self.cluster.connstr,
                               self._create_cluster_opts(),
                               **self._mock_hack())
     if cluster._is_6_5_plus():
         self.assertIsNotNone(cluster.query_indexes().get_all_indexes(
             self.bucket_name))
     else:
         self.assertRaises(NoBucketException,
                           cluster.query_indexes().list_all_indexes,
                           self.bucket_name)
Esempio n. 34
0
class ClusterTestCase(ConnectionTestCase):
    def setUp(self, **kwargs):
        self.factory = Bucket
        super(ClusterTestCase, self).setUp()
        connargs = self.cluster_info.make_connargs()
        connstr_abstract = ConnectionString.parse(
            connargs.pop('connection_string'))
        bucket_name = connstr_abstract.bucket
        connstr_abstract.bucket = None
        connstr_abstract.set_option('enable_collections', 'true')
        self.cluster = Cluster(
            connstr_abstract,
            ClusterOptions(
                ClassicAuthenticator(self.cluster_info.admin_username,
                                     self.cluster_info.admin_password)))
        self.admin = self.make_admin_connection()
        self.bucket = self.cluster.bucket(bucket_name, **connargs)
        self.bucket_name = bucket_name
Esempio n. 35
0
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        self.case_number = self.input.param("case_number", 0)

        # Clear the state from Previous invalid run
        rest.stop_rebalance()
        self.load_started = False
        self.loaders = []
        self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        SwapRebalanceBase.reset(self)
        self.cluster_helper = Cluster()
        # Initialize test params
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 100000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
        self.skip_cleanup = self.input.param("skip-cleanup", False)
        self.do_access = self.input.param("do-access", True)

        # Make sure the test is setup correctly
        min_servers = int(self.num_initial_servers) + int(self.num_swap)
        msg = "minimum {0} nodes required for running swap rebalance"
        self.assertTrue(len(self.servers) >= min_servers,
            msg=msg.format(min_servers))

        self.log.info('picking server : {0} as the master'.format(serverInfo))
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        self.log.info("==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                      .format(self.case_number, self._testMethodName))
        SwapRebalanceBase._log_start(self)
Esempio n. 36
0
    def setUp(self):
        try:
            self._log = logger.Logger.get_logger()
            self._log.info("SetUp process started ...")
            self._input = TestInputSingleton.input
            self._cluster_helper = Cluster()

            self._init_parameters()

            if not self._input.param("skip_cleanup", False):
                self._cleanup_previous_setup()

            self._init_clusters()
            self.setup_extended()
            self._log.info("SetUp process completed...")
        except:
            self._log.error("Error while setting up clusters: %s", sys.exc_info()[0])
            self._cleanup_broken_setup()
            raise
Esempio n. 37
0
 def setUp(self):
     log = logger.Logger.get_logger()
     self._input = TestInputSingleton.input
     self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
     self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
     self.bidirectional = self._input.param("bidirectional", False)
     self.case_number = self._input.param("case_number", 0)
     self._value_size = self._input.param("value_size", 256)
     self.wait_timeout = self._input.param("wait_timeout", 60)
     self._servers = self._input.servers
     self.master = self._servers[0]
     self._failed_nodes = []
     num_buckets = 0
     self.buckets = []
     self.default_bucket = self._input.param("default_bucket", True)
     if self.default_bucket:
         self.default_bucket_name = "default"
         num_buckets += 1
     self._standard_buckets = self._input.param("standard_buckets", 0)
     self._sasl_buckets = self._input.param("sasl_buckets", 0)
     num_buckets += self._standard_buckets + self._sasl_buckets
     self.dgm_run = self._input.param("dgm_run", True)
     self.log = logger.Logger().get_logger()
     self._cluster_helper = Cluster()
     self.disabled_consistent_view = self._input.param("disabled_consistent_view", None)
     self._quota = self._initialize_nodes(self._cluster_helper, self._servers, self.disabled_consistent_view)
     if self.dgm_run:
         self.quota = 256
     self.bucket_size = int((2.0 / 3.0) / float(num_buckets) * float(self._quota))
     self.gen_create = BlobGenerator('loadOne', 'loadOne_', self._value_size, end=self._keys_count)
     self.add_back_flag = False
     self._cleanup_nodes = []
     log.info("==============  setup was started for test #{0} {1}=============="\
                   .format(self.case_number, self._testMethodName))
     RemoteUtilHelper.common_basic_setup(self._servers)
     BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
     for server in self._servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
     self._setup_cluster()
     self._create_buckets_()
     log.info("==============  setup was finished for test #{0} {1} =============="\
                   .format(self.case_number, self._testMethodName))
Esempio n. 38
0
    def test_simpleconnect(self):

      #tag::simpleconnect[]
      cluster = Cluster.connect("127.0.0.1", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")
      collection = bucket.default_collection()

      # You can access multiple buckets using the same Cluster object.
      another_bucket = cluster.bucket("beer-sample")

      # You can access collections other than the default
      # if your version of Couchbase Server supports this feature.
      customer_a = bucket.scope("customer-a")
      widgets = customer_a.collection("widgets")

      #end::simpleconnect[]

      # For a graceful shutdown, disconnect from the cluster when the program ends.
      cluster.disconnect()
Esempio n. 39
0
 def InsertData(DBName,BucketName, Data):
     #Database config
     config = getConfig() 
     cluster=Cluster(config.get('couchbase', 'cluster'))
     userName=config.get('couchbase', 'DAG')
     Pwd=config.get('couchbase', 'Pwd')
     #couchbase authentification
     cluster.authenticate(PasswordAuthenticator(userName, Pwd))  
     #open the database
     cb = cluster.open_bucket(DBName)
     #open the bucket name to load data
     dbkey=cluster.open_bucket(BucketName)
     #insert data to the bucket
     dbkey.upsert_multi(Data)
Esempio n. 40
0
 def setUp(self):
     try:
         self.log = logger.Logger.get_logger()
         self.input = TestInputSingleton.input
         self.servers = self.input.servers
         self.browser = self.input.ui_conf['browser']
         self.replica  = self.input.param("replica", 1)
         self.case_number = self.input.param("case_number", 0)
         self.cluster = Cluster()
         self.machine = self.input.ui_conf['server']
         self.driver = None
         self.shell = RemoteMachineShellConnection(self.machine)
         #avoid clean up if the previous test has been tear down
         if not self.input.param("skip_cleanup", True) \
                                         or self.case_number == 1:
             self.tearDown()
         self._log_start(self)
         self._kill_old_drivers()
         #thread for selenium server
         if not self._is_selenium_running():
             self.log.info('start selenium')
             self._start_selenium_thread()
             self._wait_for_selenium_is_started()
         self.log.info('start selenium session')
         if self.browser == 'ff':
             self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                            .format(self.machine.ip,
                                                    self.machine.port),
                                            desired_capabilities=DesiredCapabilities.FIREFOX)
         elif self.browser == 'chrome':
             self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                            .format(self.machine.ip,
                                                    self.machine.port),
                                            desired_capabilities=DesiredCapabilities.CHROME)
         self.log.info('start selenium started')
         self.driver.get("http://{0}:{1}".format(self.servers[0].ip,
                                                 self.servers[0].port))
         self.driver.maximize_window()
     except Exception as ex:
         self.input.test_params["stop-on-failure"] = True
         self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
         self.fail(ex)
Esempio n. 41
0
    def setUp(self):
        try:
            self._log = logger.Logger.get_logger()
            self._input = TestInputSingleton.input
            self._init_parameters()
            self._cluster_helper = Cluster()
            self._log.info("==============  XDCRbasetests setup was started for test #{0} {1}=============="\
                .format(self._case_number, self._testMethodName))
            if not self._input.param("skip_cleanup", False):
                self._cleanup_previous_setup()

            self._init_clusters(self._disabled_consistent_view)
            self.setup_extended()
            self._log.info("==============  XDCRbasetests setup was finished for test #{0} {1} =============="\
                .format(self._case_number, self._testMethodName))
            self._log_start(self)
        except  Exception as e:
            self._log.error(e.message)
            self._log.error("Error while setting up clusters: %s", sys.exc_info())
            self._cleanup_broken_setup()
            raise
Esempio n. 42
0
 def setUp(self):
     try:
         self.log = logger.Logger.get_logger()
         self.input = TestInputSingleton.input
         self.servers = self.input.servers
         self.browser = self.input.ui_conf['browser']
         self.replica  = self.input.param("replica", 1)
         self.case_number = self.input.param("case_number", 0)
         self.cluster = Cluster()
         #avoid clean up if the previous test has been tear down
         if not self.input.param("skip_cleanup", True) \
                                         or self.case_number == 1:
             self.tearDown()
         self._log_start(self)
         #thread for selenium server
         if not self._is_selenium_running():
             self.log.info('start selenium')
             self._start_selenium_thread()
             self._wait_for_selenium_is_started()
         self.log.info('start selenium session')
         if self.browser == 'ff':
             self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                            .format(self.input.ui_conf['selenium_ip'],
                                                    self.input.ui_conf['selenium_port']),
                                            desired_capabilities=DesiredCapabilities.FIREFOX)
         elif self.browser == 'chrome':
             self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                            .format(self.input.ui_conf['selenium_ip'],
                                                    self.input.ui_conf['selenium_port']),
                                            desired_capabilities=DesiredCapabilities.CHROME)
         self.log.info('start selenium started')
         self.driver.get("http://{0}:{1}".format(self.servers[0].ip,
                                                 self.servers[0].port))
         self.driver.maximize_window()
     except Exception as ex:
         skip_setup_failed = True
         self.fail(ex)
Esempio n. 43
0
    def setUp(self):
        self.cluster = Cluster()

        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.num_servers = self.input.param("servers", 1)

        master = self.servers[0]
        num_replicas = self.input.param("replicas", 1)
        self.bucket = 'default'

        # Start: Should be in a before class function
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterOperationHelper.wait_for_ns_servers_or_assert([master], self)
        # End: Should be in a before class function

        self.quota = self.cluster.init_node(master)
        self.old_vbuckets = self._get_vbuckets(master)
        ClusterOperationHelper.set_vbuckets(master, 1)
        self.cluster.create_default_bucket(master, self.quota, num_replicas)
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])
def validate_options(options, cluster: Cluster):
    if not options.build:
        logger.error("No build given")
        sys.exit(1)

    if (options.strategy or options.wait_for_main_run) and (
            not options.previous_builds or len(options.previous_builds) == 0):
        logger.info("--previous-builds not specified, trying the calculate...")
        version = options.build.split("-")[0]
        previous_builds = list(
            cluster.query(
                "select raw `build` from greenboard where `build` like '{}%' and type = 'server' and totalCount > 18500 and `build` != '{}' group by `build` order by `build` desc limit 1"
                .format(version, options.build)))
        if len(previous_builds) == 0 or previous_builds[0] == options.build:
            logger.warning(
                "couldn't determine previous build automatically, ignoring --wait and --strategy parameters"
            )
            options.strategy = None
            options.wait_for_main_run = False
        else:
            logger.info("previous build set to {}".format(previous_builds[0]))
            options.previous_builds = [previous_builds[0]]

    if options.strategy and options.strategy == "regression" and len(
            options.previous_builds) != 1:
        logger.error(
            "regression strategy must specify 1 previous build for comparison")
        sys.exit(1)

    if options.components and options.exclude_components:
        logger.error("both include and exclude components specified")
        sys.exit(1)

    if options.subcomponents and len(options.components) > 1:
        logger.error("Can't supply multiple components with subcomponents")
        sys.exit(1)
def should_dispatch_job(os, component, sub_component, version):
    """
    Finds if a job has to be dispatched for a particular os, component,
    subcomponent and version. The method finds if the job had run
    successfully previously, if the job is currently running.
    :param os: Os of the job
    :type os: str
    :param component: Component of the job
    :type component: str
    :param sub_component: Sub-component of the job
    :type sub_component: str
    :param version: Version of the server for the job
    :type version: str
    :return: Boolean on whether to dispatch the job or not
    :rtype: bool
    """
    doc_id = "{0}_{1}_{2}_{3}".format(os, component, sub_component, version)
    cluster = Cluster('couchbase://{}'.format(host))
    authenticator = PasswordAuthenticator('Administrator', 'password')
    cluster.authenticate(authenticator)
    rerun_jobs = cluster.open_bucket(bucket_name)
    user_name = "{0}-{1}%{2}".format(component, sub_component, version)
    query = "select * from `QE-server-pool` where username like " \
            "'{0}' and state = 'booked'".format(user_name)
    qe_server_pool = cluster.open_bucket("QE-server-pool")
    n1ql_result = qe_server_pool.n1ql_query(N1QLQuery(query))
    if n1ql_result.buffered_remainder.__len__():
        print("Tests are already running. Not dispatching another job")
        return False
    run_document = rerun_jobs.get(doc_id, quiet=True)
    if not run_document.success:
        return True
    run_document = run_document.value
    last_job = run_document['jobs'][-1]
    last_job_url = last_job['job_url'].rstrip('/')
    result = jenkins_api.get_js(last_job_url, "tree=result")
    if not result or 'result' not in result:
        return True
    if result['result'] == "SUCCESS":
        print("Job had run successfully previously.")
        print("{} is the successful job.".format(last_job_url))
        return False
    return True
Esempio n. 46
0
 def _createConn(self):
     try:
         cluster = Cluster(self.connection_string, ClusterOptions(PasswordAuthenticator(self.bucket, 'password')))
         #cluster.authenticate(PasswordAuthenticator(self.bucket, 'password'))
         self.cb = cluster.bucket(self.bucket)
         self.default_collection = self.cb.default_collection()
     except BucketNotFoundError:
          raise
     except AuthError:
         # Try using default user created by the tests, if any, in case there is no user with bucket name in the
         # cluster.
         try:
             cluster = Cluster(self.connection_string,
                               ClusterOptions(PasswordAuthenticator("cbadminbucket", 'password')),
                               bucket_class=CouchbaseBucket)
             self.cb = cluster.bucket(self.bucket)
             self.default_collection = self.cb.default_collection()
         except AuthError:
             raise
def couchbase_to_SQL(bucket_name):

    table_name = 'cb_' + bucket_name

    #Connect to SQL Server
    conn = pyodbc.connect(
        'Driver={SQL Server};'
        'Server=localhost\SQLEXPRESS;'
        'Database=PythonImport;'
        'Trusted_Connection=yes;',
        autocommit=True)

    sqlcursor = conn.cursor()

    #Connect to couchbase
    cluster = Cluster('couchbase://localhost')
    authenticator = PasswordAuthenticator('user', 'password')
    cluster.authenticate(authenticator)
    c = cluster.open_bucket(bucket_name)

    #Grab all records
    q = N1QLQuery('SELECT * FROM `' + bucket_name + '` ')

    #This dictionary will contain columns for any tables created
    column_list_dict = {}

    conn.commit()

    #Loop to insert records to SQL table(s)
    try:
        for row in cluster.n1ql_query(q):
            rowval = row[bucket_name]
            process_couchbase_row(rowval, column_list_dict, table_name,
                                  sqlcursor, conn)
    except:
        print('fail')
        print("Unexpected error:", sys.exc_info()[0])
        conn.rollback()

    conn.commit()
Esempio n. 48
0
                    "AND b1.year = '{year}' " \
            "AND (b2.active = true OR b2.active IS MISSING) " \
            "GROUP BY b1.age, b1.total_teams, b1.club, b2.full_name, b2.lat, b2.lon) AS w " \
            "GROUP BY w.full_name, w.lat, w.lon, w.club";

QUERY_GET_BASIC_CLUB_INFO = \
            "SELECT address, full_name, website FROM {region} USE KEYS['{club_id}']"


QUERY_GET_CLUB_SEASON_RESULTS = \
            "SELECT team_name AS team, age AS age, regional_place AS finised_regionals_at, total_teams AS out_of, " \
            "ROUND(100 - TONUMBER(final_place)/total_teams*100) AS our_rating FROM {region} " \
            "WHERE doctype = 'result' AND club = '{club_id}' AND year = '{year}' " \
            "ORDER BY age, TO_NUMBER(final_place)"

cluster = Cluster('couchbase://{}'.format(SERVER))
authenticator = PasswordAuthenticator(USR, PWD)
cluster.authenticate(authenticator)
cb = cluster.open_bucket("regions")

def get_pins_by_region(region):
    statement = QUERY_GET_PINS_BY_REGION.format(region = region, year = CURRENT_YEAR)
    query_res = cb.n1ql_query(N1QLQuery(statement))
    all_pins = list()

    for r in query_res:
        all_pins.append(r)
    return all_pins


def get_basic_club_info(club_id, region):
Esempio n. 49
0
class BaseUITestCase(unittest.TestCase):
    # selenium thread

    def _start_selenium(self):
        host = self.machine.ip
        if host in ['localhost', '127.0.0.1']:
            os.system("java -jar %sselenium-server-standalone*.jar -Dwebdriver.chrome.driver=%s > selenium.log 2>&1"
                      % (self.input.ui_conf['selenium_path'], self.input.ui_conf['chrome_path']))
        else:
            self.shell.execute_command('{0}start-selenium.bat > {0}selenium.log 2>&1 &'.format(self.input.ui_conf['selenium_path']))

    def _kill_old_drivers(self):
        if self.shell.extract_remote_info().type.lower() == 'windows':
            self.shell.execute_command('taskkill /F /IM chromedriver.exe')

    def _wait_for_selenium_is_started(self, timeout=10):
        if self.machine.ip in ['localhost', '127.0.0.1']:
            start_time = time.time()
            while (time.time() - start_time) < timeout:
                log = open("/tmp/selenium.log")
                if log.read().find('Started org.openqa.jetty.jetty.Server') > -1:
                    log.close()
                    if self._is_selenium_running():
                        time.sleep(1)
                        return
                time.sleep(1)
        else:
            time.sleep(timeout)

    def _start_selenium_thread(self):
        self.t = Thread(target=self._start_selenium,
                       name="selenium",
                       args=())
        self.t.start()

    def _is_selenium_running(self):
        host = self.machine.ip
        if host in ['localhost', '127.0.0.1']:
            cmd = 'ps -ef|grep selenium-server'
            output = commands.getstatusoutput(cmd)
            if str(output).find('selenium-server-standalone') > -1:
                return True
        else:
            #cmd = "ssh {0}@{1} 'bash -s' < 'tasklist |grep selenium-server'".format(self.input.servers[0].ssh_username,
            #                                                                        host)
            cmd = 'tasklist |grep java'
            o, r = self.shell.execute_command(cmd)
            #cmd = "ssh {0}@{1} 'bash -s' < 'ps -ef|grep selenium-server'"
            if str(o).find('java') > -1:
                return True
        return False

    def setUp(self):
        try:
            self.log = logger.Logger.get_logger()
            self.input = TestInputSingleton.input
            self.servers = self.input.servers
            self.browser = self.input.ui_conf['browser']
            self.replica  = self.input.param("replica", 1)
            self.case_number = self.input.param("case_number", 0)
            self.cluster = Cluster()
            self.machine = self.input.ui_conf['server']
            self.driver = None
            self.shell = RemoteMachineShellConnection(self.machine)
            #avoid clean up if the previous test has been tear down
            if not self.input.param("skip_cleanup", True) \
                                            or self.case_number == 1:
                self.tearDown()
            self._log_start(self)
            self._kill_old_drivers()
            #thread for selenium server
            if not self._is_selenium_running():
                self.log.info('start selenium')
                self._start_selenium_thread()
                self._wait_for_selenium_is_started()
            self.log.info('start selenium session')
            if self.browser == 'ff':
                self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                               .format(self.machine.ip,
                                                       self.machine.port),
                                               desired_capabilities=DesiredCapabilities.FIREFOX)
            elif self.browser == 'chrome':
                self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                               .format(self.machine.ip,
                                                       self.machine.port),
                                               desired_capabilities=DesiredCapabilities.CHROME)
            self.log.info('start selenium started')
            self.driver.get("http://{0}:{1}".format(self.servers[0].ip,
                                                    self.servers[0].port))
            self.driver.maximize_window()
        except Exception as ex:
            self.input.test_params["stop-on-failure"] = True
            self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
            self.fail(ex)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(),
                                              self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(),
                                               self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def tearDown(self):
        try:
            if self.driver:
                path_screen = self.input.ui_conf['screenshots'] or 'logs/screens'
                full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)
                self.log.info('screenshot is available: %s' % full_path)
                if not os.path.exists(path_screen):
                    os.mkdir(path_screen)
                self.driver.get_screenshot_as_file(os.path.abspath(full_path))
            rest = RestConnection(self.servers[0])
            if rest._rebalance_progress_status() == 'running':
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
            if self.driver:
                self.driver.close()
        finally:
            if self.driver:
                self.shell.disconnect()
            self.cluster.shutdown()
Esempio n. 50
0
import os
import pprint
import sys
import couchbase
import requests
from datetime import datetime
from couchbase.cluster import Cluster, ClusterOptions
from couchbase_core.cluster import PasswordAuthenticator

# CONNECT DB
cluster = Cluster(
    'couchbase://couchbase',
    ClusterOptions(
        PasswordAuthenticator(os.environ.get('DB_USER'),
                              os.environ.get('DB_PASSWORD'))))

# BUCKET ITEMS
cb = cluster.bucket('items').default_collection()

# CREATION DES INDEX
cb.query('CREATE PRIMARY INDEX `items-primary-index` ON `items` USING GSI;'
         ).execute()
cb.query('CREATE INDEX `items-corrupted-index` ON items(corrupted) USING GSI;'
         ).execute()
cb.query(
    'CREATE INDEX `items-learning-index` ON items(learningData) USING GSI;'
).execute()
formatter = logging.Formatter(
    "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)

ap = argparse.ArgumentParser()

ap.add_argument("--cb_server", default="172.23.121.84")
ap.add_argument("--cb_username", default="Administrator")
ap.add_argument("--cb_password", default="password")
ap.add_argument("versions")

args = vars(ap.parse_args())

cluster = Cluster(
    "couchbase://" + args["cb_server"],
    ClusterOptions(
        PasswordAuthenticator(args["cb_username"], args["cb_password"])))

server_bucket = cluster.bucket("server")
greenboard_bucket = cluster.bucket("greenboard")
greenboard_collection = greenboard_bucket.default_collection()

supplied_versions = args["versions"].split(",")
versions = set()

for v in supplied_versions:
    for version in list(
            server_bucket.query(
                "select raw `build` from server where `build` like '%{}%' group by `build`"
                .format(v))):
        versions.add(version)
Esempio n. 52
0
class SwapRebalanceBase(unittest.TestCase):

    @staticmethod
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        self.case_number = self.input.param("case_number", 0)

        # Clear the state from Previous invalid run
        if rest._rebalance_progress_status() == 'running':
                self.log.warning("rebalancing is still running, previous test should be verified")
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
        self.load_started = False
        self.loaders = []
        self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        SwapRebalanceBase.reset(self)
        self.cluster_helper = Cluster()
        # Initialize test params
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 100000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)

        # Make sure the test is setup correctly
        min_servers = int(self.num_initial_servers) + int(self.num_swap)
        msg = "minimum {0} nodes required for running swap rebalance"
        self.assertTrue(len(self.servers) >= min_servers,
            msg=msg.format(min_servers))

        self.log.info('picking server : {0} as the master'.format(serverInfo))
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        self.log.info("==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                      .format(self.case_number, self._testMethodName))
        SwapRebalanceBase._log_start(self)

    @staticmethod
    def common_tearDown(self):
        self.cluster_helper.shutdown()
        test_failed = (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures or self._resultForDoCleanups.errors)) \
                   or (hasattr(self, '_exc_info') and self._exc_info()[1] is not None)
        if test_failed and TestInputSingleton.input.param("stop-on-failure", False)\
                        or self.input.param("skip_cleanup", False):
                    self.log.warn("CLEANUP WAS SKIPPED")
        else:
            SwapRebalanceBase.reset(self)
            SwapRebalanceBase._log_finish(self)

    @staticmethod
    def reset(self):
        self.log.info("==============  SwapRebalanceBase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
        self.log.info("Stopping load in Teardown")
        SwapRebalanceBase.stop_load(self.loaders)
        for server in self.servers:
            rest = RestConnection(server)
            if rest._rebalance_progress_status() == 'running':
                self.log.warning("rebalancing is still running, test should be verified")
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
            if server.data_path:
                rest = RestConnection(server)
                rest.set_data_path(data_path=server.data_path)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
        self.log.info("==============  SwapRebalanceBase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def sleep(self, timeout=1, message=""):
        self.log.info("sleep for {0} secs. {1} ...".format(timeout, message))
        time.sleep(timeout)

    @staticmethod
    def _create_default_bucket(self, replica=1):
        name = "default"
        master = self.servers[0]
        rest = RestConnection(master)
        helper = RestHelper(RestConnection(master))
        if not helper.bucket_exists(name):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
            info = rest.get_nodes_self()
            available_ram = info.memoryQuota * node_ram_ratio
            rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica)
            ready = BucketOperationHelper.wait_for_memcached(master, name)
            self.assertTrue(ready, msg="wait_for_memcached failed")
        self.assertTrue(helper.bucket_exists(name),
            msg="unable to create {0} bucket".format(name))

    @staticmethod
    def _create_multiple_buckets(self, replica=1):
        master = self.servers[0]
        created = BucketOperationHelper.create_multiple_buckets(master, replica, howmany=self.num_buckets)
        self.assertTrue(created, "unable to create multiple buckets")

        rest = RestConnection(master)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
            self.assertTrue(ready, msg="wait_for_memcached failed")

    # Used for items verification active vs. replica
    @staticmethod
    def items_verification(test, master):
        rest = RestConnection(master)
        # Verify items count across all node
        timeout = 600
        for bucket in rest.get_buckets():
            verified = RebalanceHelper.wait_till_total_numbers_match(master, bucket.name, timeout_in_seconds=timeout)
            test.assertTrue(verified, "Lost items!!.. failing test in {0} secs".format(timeout))

    @staticmethod
    def start_load_phase(self, master):
        loaders = []
        rest = RestConnection(master)
        for bucket in rest.get_buckets():
            loader = dict()
            loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count, bucket=bucket.name,
                password=bucket.saslPassword, prefix=str(bucket.name), port=8091)
            loader["mcsoda"].cfg["exit-after-creates"] = 1
            loader["mcsoda"].cfg["json"] = 0
            loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
            loader["thread"].daemon = True
            loaders.append(loader)
        for loader in loaders:
            loader["thread"].start()
        return loaders

    @staticmethod
    def start_access_phase(self, master):
        loaders = []
        rest = RestConnection(master)
        for bucket in rest.get_buckets():
            loader = dict()
            loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count / 2, bucket=bucket.name,
                    password=bucket.saslPassword, prefix=str(bucket.name), port=8091)
            loader["mcsoda"].cfg["ratio-sets"] = 0.8
            loader["mcsoda"].cfg["ratio-hot"] = 0.2
            loader["mcsoda"].cfg["ratio-creates"] = 0.5
            loader["mcsoda"].cfg["ratio-deletes"] = self.ratio_deletes
            loader["mcsoda"].cfg["ratio-expirations"] = self.ratio_expiry
            loader["mcsoda"].cfg["json"] = 0
            loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
            loader["thread"].daemon = True
            loaders.append(loader)
        for loader in loaders:
            loader["thread"].start()
        return loaders

    @staticmethod
    def stop_load(loaders, do_stop=True):
        if do_stop:
            for loader in loaders:
                loader["mcsoda"].load_stop()
        for loader in loaders:
            if do_stop:
                loader["thread"].join(300)
            else:
                loader["thread"].join()

    @staticmethod
    def create_buckets(self):
        if self.num_buckets == 1:
            SwapRebalanceBase._create_default_bucket(self, replica=self.replica)
        else:
            SwapRebalanceBase._create_multiple_buckets(self, replica=self.replica)

    @staticmethod
    def verification_phase(test, master):
        # Stop loaders
        SwapRebalanceBase.stop_load(test.loaders)
        test.log.info("DONE DATA ACCESS PHASE")

        test.log.info("VERIFICATION PHASE")
        rest = RestConnection(master)
        servers_in_cluster = []
        nodes = rest.get_nodes()
        for server in test.servers:
            for node in nodes:
                if node.ip == server.ip:
                    servers_in_cluster.append(server)
        RebalanceHelper.wait_for_replication(servers_in_cluster, test.cluster_helper)
        SwapRebalanceBase.items_verification(test, master)

    @staticmethod
    def _common_test_body_swap_rebalance(self, do_stop_start=False):
        master = self.servers[0]
        rest = RestConnection(master)
        num_initial_servers = self.num_initial_servers
        creds = self.input.membase_settings
        intial_severs = self.servers[:num_initial_servers]

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
        self.assertTrue(status, msg="Rebalance was failed")

        self.log.info("DATA LOAD PHASE")
        self.loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        current_nodes = RebalanceHelper.getOtpNodeIds(master)
        self.log.info("current nodes : {0}".format(current_nodes))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
        optNodesIds = [node.id for node in toBeEjectedNodes]

        if self.swap_orchestrator:
            status, content = ClusterOperationHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                format(status, content))
            if self.num_swap is len(current_nodes):
                optNodesIds.append(content)
            else:
                optNodesIds[0] = content

        for node in optNodesIds:
            self.log.info("removing node {0} and rebalance afterwards".format(node))

        new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.swap_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        if self.do_access:
            self.log.info("DATA ACCESS PHASE")
            self.loaders = SwapRebalanceBase.start_access_phase(self, master)

        self.log.info("SWAP REBALANCE PHASE")
        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
            ejectedNodes=optNodesIds)

        if do_stop_start:
            # Rebalance is stopped at 20%, 40% and 60% completion
            retry = 0
            for expected_progress in (20, 40, 60):
                self.log.info("STOP/START SWAP REBALANCE PHASE WITH PROGRESS {0}%".
                              format(expected_progress))
                while True:
                    progress = rest._rebalance_progress()
                    if progress < 0:
                        self.log.error("rebalance progress code : {0}".format(progress))
                        break
                    elif progress == 100:
                        self.log.warn("Rebalance has already reached 100%")
                        break
                    elif progress >= expected_progress:
                        self.log.info("Rebalance will be stopped with {0}%".format(progress))
                        stopped = rest.stop_rebalance()
                        self.assertTrue(stopped, msg="unable to stop rebalance")
                        SwapRebalanceBase.sleep(self, 20)
                        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
                                       ejectedNodes=optNodesIds)
                        break
                    elif retry > 100:
                        break
                    else:
                        retry += 1
                        SwapRebalanceBase.sleep(self, 1)
        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(optNodesIds))
        SwapRebalanceBase.verification_phase(self, master)

    @staticmethod
    def _common_test_body_failed_swap_rebalance(self):
        master = self.servers[0]
        rest = RestConnection(master)
        num_initial_servers = self.num_initial_servers
        creds = self.input.membase_settings
        intial_severs = self.servers[:num_initial_servers]

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
        self.assertTrue(status, msg="Rebalance was failed")

        self.log.info("DATA LOAD PHASE")
        self.loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        current_nodes = RebalanceHelper.getOtpNodeIds(master)
        self.log.info("current nodes : {0}".format(current_nodes))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
        optNodesIds = [node.id for node in toBeEjectedNodes]
        if self.swap_orchestrator:
            status, content = ClusterOperationHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            # When swapping all the nodes
            if self.num_swap is len(current_nodes):
                optNodesIds.append(content)
            else:
                optNodesIds[0] = content

        for node in optNodesIds:
            self.log.info("removing node {0} and rebalance afterwards".format(node))

        new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.swap_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        self.log.info("DATA ACCESS PHASE")
        self.loaders = SwapRebalanceBase.start_access_phase(self, master)

        self.log.info("SWAP REBALANCE PHASE")
        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
            ejectedNodes=optNodesIds)
        SwapRebalanceBase.sleep(self, 10, "Rebalance should start")
        self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(self.percentage_progress))
        reached = RestHelper(rest).rebalance_reached(self.percentage_progress)
        if reached == 100 and not RestHelper(rest).is_cluster_rebalanced():
            # handle situation when rebalance failed at the beginning
            self.log.error('seems rebalance failed!')
            self.log.info("Latest logs from UI:")
            for i in rest.get_logs(): self.log.error(i)
            self.fail("rebalance failed even before killing memcached")
        bucket = rest.get_buckets()[0].name
        pid = None
        if self.swap_orchestrator:
            # get PID via remote connection if master is a new node
            shell = RemoteMachineShellConnection(master)
            o, _ = shell.execute_command("ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
            pid = o[0]
            shell.disconnect()
        else:
            for i in xrange(2):
                try:
                    _mc = MemcachedClientHelper.direct_client(master, bucket)
                    pid = _mc.stats()["pid"]
                    break
                except EOFError as e:
                    self.log.error("{0}.Retry in 2 sec".format(e))
                    SwapRebalanceBase.sleep(self, 1)
        if pid is None:
            self.fail("impossible to get a PID")
        command = "os:cmd(\"kill -9 {0} \")".format(pid)
        self.log.info(command)
        killed = rest.diag_eval(command)
        self.log.info("killed {0}:{1}??  {2} ".format(master.ip, master.port, killed))
        self.log.info("sleep for 10 sec after kill memcached")
        SwapRebalanceBase.sleep(self, 10)
        # we can't get stats for new node when rebalance falls
        if not self.swap_orchestrator:
            ClusterOperationHelper._wait_warmup_completed(self, [master], bucket, wait_time=600)
        i = 0
        # we expect that rebalance will be failed
        try:
            rest.monitorRebalance()
        except RebalanceFailedException:
            # retry rebalance if it failed
            self.log.warn("Rebalance failed but it's expected")
            SwapRebalanceBase.sleep(self, 30)
            self.assertFalse(RestHelper(rest).is_cluster_rebalanced(), msg="cluster need rebalance")
            knownNodes = rest.node_statuses();
            self.log.info("nodes are still in cluster: {0}".format([(node.ip, node.port) for node in knownNodes]))
            ejectedNodes = list(set(optNodesIds) & set([node.id for node in knownNodes]))
            rest.rebalance(otpNodes=[node.id for node in knownNodes], ejectedNodes=ejectedNodes)
            self.assertTrue(rest.monitorRebalance(),
                            msg="rebalance operation failed after adding node {0}".format(toBeEjectedNodes))
        else:
            self.log.info("rebalance completed successfully")
        SwapRebalanceBase.verification_phase(self, master)

    @staticmethod
    def _add_back_failed_node(self, do_node_cleanup=False):
        master = self.servers[0]
        rest = RestConnection(master)
        creds = self.input.membase_settings

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all servers
        self.log.info("INITIAL REBALANCE PHASE")
        status, servers_rebalanced = RebalanceHelper.rebalance_in(self.servers, len(self.servers) - 1)
        self.assertTrue(status, msg="Rebalance was failed")

        self.log.info("DATA LOAD PHASE")
        self.loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        current_nodes = RebalanceHelper.getOtpNodeIds(master)
        self.log.info("current nodes : {0}".format(current_nodes))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
        optNodesIds = [node.id for node in toBeEjectedNodes]

        # List of servers that will not be failed over
        not_failed_over = []
        for server in self.servers:
            if server.ip not in [node.ip for node in toBeEjectedNodes]:
                not_failed_over.append(server)
                self.log.info("Node %s not failed over" % server.ip)

        if self.fail_orchestrator:
            status, content = ClusterOperationHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                format(status, content))
            # When swapping all the nodes
            if self.num_swap is len(current_nodes):
                optNodesIds.append(content)
            else:
                optNodesIds[0] = content
            master = not_failed_over[-1]

        self.log.info("DATA ACCESS PHASE")
        self.loaders = SwapRebalanceBase.start_access_phase(self, master)

        # Failover selected nodes
        for node in optNodesIds:
            self.log.info("failover node {0} and rebalance afterwards".format(node))
            rest.fail_over(node)

        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
            ejectedNodes=optNodesIds)

        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(optNodesIds))

        # Add back the same failed over nodes

        # Cleanup the node, somehow
        # TODO: cluster_run?
        if do_node_cleanup:
            pass

        # Make rest connection with node part of cluster
        rest = RestConnection(master)

        # Given the optNode, find ip
        add_back_servers = []
        nodes = rest.get_nodes()
        for server in [node.ip for node in nodes]:
            if isinstance(server, unicode):
                add_back_servers.append(server)
        final_add_back_servers = []
        for server in self.servers:
            if server.ip not in add_back_servers:
                final_add_back_servers.append(server)

        for server in final_add_back_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])

        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(add_back_servers))

        SwapRebalanceBase.verification_phase(self, master)

    @staticmethod
    def _failover_swap_rebalance(self):
        master = self.servers[0]
        rest = RestConnection(master)
        creds = self.input.membase_settings
        num_initial_servers = self.num_initial_servers
        intial_severs = self.servers[:num_initial_servers]

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
        self.assertTrue(status, msg="Rebalance was failed")

        self.log.info("DATA LOAD PHASE")
        self.loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
        optNodesIds = [node.id for node in toBeEjectedNodes]
        if self.fail_orchestrator:
            status, content = ClusterOperationHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            optNodesIds[0] = content

        self.log.info("FAILOVER PHASE")
        # Failover selected nodes
        for node in optNodesIds:
            self.log.info("failover node {0} and rebalance afterwards".format(node))
            rest.fail_over(node)

        new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.failover_factor]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.fail_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        self.log.info("DATA ACCESS PHASE")
        self.loaders = SwapRebalanceBase.start_access_phase(self, master)

        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
            ejectedNodes=optNodesIds)

        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(new_swap_servers))

        SwapRebalanceBase.verification_phase(self, master)
Esempio n. 53
0
        for o, a in opts:
            if o == "-h":
                usage()

        input = TestInput.TestInputParser.get_test_input(sys.argv)
        if not input.servers:
            usage("ERROR: no servers specified. Please use the -i parameter.")
    except IndexError:
        usage()
    except getopt.GetoptError, error:
        usage("ERROR: " + str(error))

    docs_per_day = input.param("doc_per_day", 49)
    years = input.param("years", 2)
    bucket_name = input.param("bucket_name", "default")
    bucket_port = input.param("bucket_port", None)
    bucket_sasl_pass = input.param("bucket_sasl_pass", None)
    flag = input.param("flags", 0)

    cluster = Cluster()
    try:
        bucket = initialize_bucket(bucket_name, bucket_port, bucket_sasl_pass)
        loader = DocLoader(input.servers, cluster)
        generators_load = loader.generate_docs(docs_per_day, years)
        loader.load(generators_load, bucket, flag=flag)
    finally:
        cluster.shutdown()

if __name__ == "__main__":
    main()
Esempio n. 54
0
class XDCRBaseTest (unittest.TestCase):

    def setUp(self):
        try:
            self._log = logger.Logger.get_logger()
            self._log.info("SetUp process started ...")
            self._input = TestInputSingleton.input
            self._cluster_helper = Cluster()

            self._init_parameters()

            if not self._input.param("skip_cleanup", False):
                self._cleanup_previous_setup()

            self._init_clusters()
            self.setup_extended()
            self._log.info("SetUp process completed...")
        except:
            self._log.error("Error while setting up clusters: %s", sys.exc_info()[0])
            self._cleanup_broken_setup()
            raise

    def tearDown(self):
        self.teardown_extended()
        self._do_cleanup()
        self._cluster_helper.shutdown()

    def _cleanup_previous_setup(self):
        self.teardown_extended()
        self._do_cleanup()

    def test_setUp(self):
        pass

    def _init_parameters(self):
        self._log.info("Initializing input parameters started...")

        self._clusters_dic = self._input.clusters # clusters is declared as dic in TestInput which is unordered.
        self._clusters_keys_olst = range(len(self._clusters_dic)) #clusters are populated in the dic in testrunner such that ordinal is the key. 
                                                                   #orderedDic cannot be used in order to maintain the compability with python 2.6
        self._cluster_names_dic = {} # populated in _init_clusters() method
        self._cluster_counter_temp_int = 0 #TODO: fix the testrunner code to pass cluster name in params.

        self._buckets_arr = ["default"]  #??    
        self._default_bucket_bool = self._input.param("default_bucket", True)
        self._standard_buckets_int = self._input.param("standard_buckets", 0)
        self._sasl_buckets_int = self._input.param("sasl_buckets", 0)
        self._total_buckets_int = self._sasl_buckets_int + self._default_bucket_bool + self._standard_buckets_int

        #self.num_servers = self._input.param("servers", len(self.servers))
        self._num_replicas_int = self._input.param("replicas", 1)
        self._num_items_int = self._input.param("items", 1000)
        self._dgm_run_bool = self._input.param("dgm_run", False)

        self._mem_quota_int = 0 # will be set in subsequent methods

        self.init_parameters_extended()

        self._log.info("Initializing input parameters completed...")


    def _init_clusters(self):
        for key in self._clusters_keys_olst:
            self._set_cluster_names(key)
            self._setup_cluster(self._clusters_dic[key])


    # This method shall be overridden in case there are parameters that need to be initialized.
    def init_parameters_extended(self):
        pass

    # This method shall be overridden in case there are custom steps involved during setup.
    def setup_extended(self):
        pass

    # This method shall be overridden in case there are custom steps involved during teardown.
    def teardown_extended(self):
        pass

    def _do_cleanup(self):
        for key in self._clusters_keys_olst:
            nodes = self._clusters_dic[key]
            BucketOperationHelper.delete_all_buckets_or_assert(nodes, self)
            ClusterOperationHelper.cleanup_cluster(nodes)
            ClusterOperationHelper.wait_for_ns_servers_or_assert(nodes, self)

    def _cleanup_broken_setup(self):
        try:
            self.tearDown()
        except:
            self._log.info("Error while cleaning broken setup.")

    def _set_cluster_names(self, key):
        self._cluster_counter_temp_int += 1
        self._cluster_names_dic[key] = "cluster-{0}".format(self._cluster_counter_temp_int)

    def _setup_cluster(self, nodes):
        self._init_nodes(nodes)
        self._create_buckets(nodes)
        self._config_cluster(nodes)

    def _init_nodes(self, nodes):
        _tasks = []
        for node in nodes:
            _tasks.append(self._cluster_helper.async_init_node(node))
        for task in _tasks:
            mem_quota_node = task.result()
            if mem_quota_node < self._mem_quota_int or self._mem_quota_int == 0:
                self._mem_quota_int = mem_quota_node

    def _create_buckets(self, nodes):
        master_node = nodes[0]
        if self._dgm_run_bool:
            self._mem_quota_int = 256
        bucket_size = self._get_bucket_size(master_node, nodes, self._mem_quota_int, self._total_buckets_int)
        if self._default_bucket_bool:
            self._cluster_helper.create_default_bucket(master_node, bucket_size, self._num_replicas_int)
            #self._buckets_arr['default'] = {1 : KVStore()} # - not sure abou this
        self._create_sasl_buckets(master_node, bucket_size, self._sasl_buckets_int)
        # TODO (Mike): Create Standard buckets    

    def _create_sasl_buckets(self, master_node, bucket_size, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.self.cluster_helper.async_create_sasl_bucket(master_node, name,
                                                                                  'password', bucket_size,
                                                                                  self._num_replicas_int))

            task.result()

    def _config_cluster(self, nodes):
        task = self._cluster_helper.async_rebalance(nodes, nodes[1:], [])
            #self._buckets_arr[name] = {1 : KVStore()}
        for task in bucket_tasks:
            task.result()

    def _config_cluster(self, nodes):
        task = self._cluster_helper.async_rebalance(nodes, nodes[1:], [])
        task.result()

    def _get_bucket_size(self, master_node, nodes, mem_quota, num_buckets, ratio = 2.0 / 3.0):
        for node in nodes:
            if node.ip == master_node.ip:
                return int(ratio / float(len(nodes)) / float(num_buckets) * float(mem_quota))
        return int(ratio / float(num_buckets) * float(mem_quota))
Esempio n. 55
0
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
        if not self.input.param("skip_cleanup", False):
            try:
                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning("rebalancing is still running, test should be verified")
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                time.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0 / 3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) / float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self.num_replicas))
            self.buckets.append(Bucket(name=name, authType="sasl", saslPassword='******',
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size));
        for task in bucket_tasks:
            task.result()

    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_standard_bucket(server, name,
                                                                          11214 + i,
                                                                          self.bucket_size,
                                                                          self.num_replicas))

            self.buckets.append(Bucket(name=name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                       bucket_size=self.bucket_size, port=11214 + i));
        for task in bucket_tasks:
            task.result()

    def _all_buckets_delete(self, server):
        delete_tasks = []
        for bucket in self.buckets:
            delete_tasks.append(self.cluster.async_bucket_delete(server, bucket.name))

        for task in delete_tasks:
            task.result()
        self.buckets = []

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)


    """Asynchronously applys load generation to all bucekts in the cluster.
 bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp
    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """
    def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = []
        for bucket in self.buckets:
            gen = copy.deepcopy(kv_gen)
            tasks.append(self.cluster.async_load_gen_docs(server, bucket.name, gen,
                                                          bucket.kvs[kv_store],
                                                          op_type, exp, flag, only_store_hash, batch_size, pause_secs, timeout_secs))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """
    def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1, flag=0, only_store_hash=True, batch_size=1, pause_secs=1, timeout_secs=30):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store, flag, only_store_hash, batch_size, pause_secs, timeout_secs)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """
    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_queue_size', '==', 0))
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """
    def _verify_all_buckets(self, server, kv_store=1, timeout=180, max_verify=None, only_store_hash=True, batch_size=1):
        tasks = []
        for bucket in self.buckets:
            tasks.append(self.cluster.async_verify_data(server, bucket, bucket.kvs[kv_store], max_verify, only_store_hash, batch_size))
        for task in tasks:
            task.result(timeout)


    def disable_compaction(self, server=None, bucket="default"):

        server = server or self.servers[0]
        new_config = {"viewFragmntThresholdPercentage" : None,
                      "dbFragmentThresholdPercentage" :  None,
                      "dbFragmentThreshold" : None,
                      "viewFragmntThreshold" : None}
        self.cluster.modify_fragmentation_config(server, new_config, bucket)

    def async_create_views(self, server, design_doc_name, views, bucket="default"):
        tasks = []
        if len(views):
            for view in views:
                t_ = self.cluster.async_create_view(server, design_doc_name, view, bucket)
                tasks.append(t_)
        else:
            t_ = self.cluster.async_create_view(server, design_doc_name, None, bucket)
            tasks.append(t_)
        return tasks

    def create_views(self, server, design_doc_name, views, bucket="default", timeout=None):
        if len(views):
            for view in views:
                self.cluster.create_view(server, design_doc_name, view, bucket, timeout)
        else:
            self.cluster.create_view(server, design_doc_name, None, bucket, timeout)

    def make_default_views(self, prefix, count, is_dev_ddoc=False):
        ref_view = self.default_view
        ref_view.name = (prefix, ref_view.name)[prefix is None]
        return [View(ref_view.name + str(i), ref_view.map_func, None, is_dev_ddoc) for i in xrange(count)]

    def _load_doc_data_all_buckets(self, data_op="create"):
        #initialize the template for document generator
        age = range(5)
        first = ['james', 'sharon']
        template = '{{ "age": {0}, "first_name": "{1}" }}'
        gen_load = DocumentGenerator('test_docs', template, age, first, start=0, end=self.num_items)

        self.log.info("%s %s documents..." % (data_op, self.num_items))
        self._load_all_buckets(self.master, gen_load, data_op, 0)

    #returns true if warmup is completed in wait_time sec,
    #otherwise return false
    @staticmethod
    def _wait_warmup_completed(self, servers, bucket_name, wait_time=300):
        warmed_up = False
        log = logger.Logger.get_logger()
        for server in servers:
            mc = None
            start = time.time()
            # Try to get the stats for 5 minutes, else hit out.
            while time.time() - start < wait_time:
                # Get the wamrup time for each server
                try:
                    mc = MemcachedClientHelper.direct_client(server, bucket_name)
                    stats = mc.stats()
                    if stats is not None:
                        warmup_time = int(stats["ep_warmup_time"])
                        log.info("ep_warmup_time is %s " % warmup_time)
                        log.info(
                            "Collected the stats %s for server %s:%s" % (stats["ep_warmup_time"], server.ip,
                                server.port))
                        break
                    else:
                        log.info(" Did not get the stats from the server yet, trying again.....")
                        time.sleep(2)
                except Exception as e:
                    log.error(
                        "Could not get warmup_time stats from server %s:%s, exception %s" % (server.ip,
                            server.port, e))
            else:
                self.fail(
                    "Fail! Unable to get the warmup-stats from server %s:%s after trying for %s seconds." % (
                        server.ip, server.port, wait_time))

            # Waiting for warm-up
            start = time.time()
            warmed_up = False
            while time.time() - start < wait_time and not warmed_up:
                if mc.stats()["ep_warmup_thread"] == "complete":
                    log.info("warmup completed, awesome!!! Warmed up. %s items " % (mc.stats()["curr_items_tot"]))
                    warmed_up = True
                    continue
                elif mc.stats()["ep_warmup_thread"] == "running":
                    log.info(
                                "still warming up .... curr_items_tot : %s" % (mc.stats()["curr_items_tot"]))
                else:
                    fail("Value of ep warmup thread does not exist, exiting from this server")
                time.sleep(5)
            mc.close()
        return warmed_up
Esempio n. 56
0
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.cluster = Cluster()
        self.servers = self.input.servers
        self.buckets = {}

        self.default_bucket = self.input.param("default_bucket", True)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)

        if not self.input.param("skip_cleanup", False):
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)

        self.quota = self._initialize_nodes(self.cluster, self.servers)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
            self.buckets['default'] = {1 : KVStore()}
        self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
        # TODO (Mike): Create Standard buckets

    def tearDown(self):
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        ClusterOperationHelper.cleanup_cluster(self.servers)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
        self.buckets = {}
        self.cluster.shutdown()

    def _initialize_nodes(self, cluster, servers):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0/3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) / float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self.num_replicas))
            self.buckets[name] = {1 : KVStore()}
        for task in bucket_tasks:
            task.result()

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket, kv_stores in self.buckets.items():
            items = sum([len(kv_store) for kv_store in kv_stores.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)


    """Asynchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)

    Returns:
        A list of all of the tasks created.
    """
    def _async_load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1):
        tasks = []
        for bucket, kv_stores in self.buckets.items():
            gen = copy.deepcopy(kv_gen)
            tasks.append(self.cluster.async_load_gen_docs(server, bucket, gen,
                                                          kv_stores[kv_store],
                                                          op_type, exp))
        return tasks

    """Synchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
        op_type - "create", "read", "update", or "delete" (String)
        exp - The expiration for the items if updated or created (int)
        kv_store - The index of the bucket's kv_store to use. (int)
    """
    def _load_all_buckets(self, server, kv_gen, op_type, exp, kv_store=1):
        tasks = self._async_load_all_buckets(server, kv_gen, op_type, exp, kv_store)
        for task in tasks:
            task.result()

    """Waits for queues to drain on all servers and buckets in a cluster.

    A utility function that waits for all of the items loaded to be persisted
    and replicated.

    Args:
        servers - A list of all of the servers in the cluster. ([TestInputServer])
    """
    def _wait_for_stats_all_buckets(self, servers):
        tasks = []
        for server in servers:
            for bucket in self.buckets:
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_queue_size', '==', 0))
                tasks.append(self.cluster.async_wait_for_stats([server], bucket, '',
                                   'ep_flusher_todo', '==', 0))
        for task in tasks:
            task.result()

    """Verifies data on all of the nodes in a cluster.

    Verifies all of the data in a specific kv_store index for all buckets in
    the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_store - The kv store index to check. (int)
    """
    def _verify_all_buckets(self, server, kv_store=1):
        tasks = []
        for bucket, kv_stores in self.buckets.items():
            tasks.append(self.cluster.async_verify_data(server, bucket, kv_stores[kv_store]))
        for task in tasks:
            task.result()
Esempio n. 57
0
 def _connect(self):
     """Establish a connection to the Couchbase cluster."""
     cluster = Cluster('http://{}:{}'.format(self.host, self.port))
     authenticator = PasswordAuthenticator('Administrator', self.password)
     cluster.authenticate(authenticator)
     self.client = cluster.open_bucket(self.bucket)
Esempio n. 58
0
class BaseUITestCase(unittest.TestCase):
    skip_setup_failed  = False
    # selenium thread

    def _start_selenium(self):
        host = self.input.ui_conf['selenium_ip']
        if host in ['localhost', '127.0.0.1']:
            os.system("java -jar ~/Downloads/selenium-server-standalone-2.24.1.jar -Dwebdriver.chrome.driver=%s > selenium.log 2>&1"
                      % self.input.ui_conf['chrome_path'])
        else:
            os.system("ssh {0}@{1} 'bash -s' < 'java -jar ~/Downloads/selenium-server-standalone-2.24.1.jar -Dwebdriver.chrome.driver={2}' > /tmp/selenium.log 2>&1".format(self.input.servers[0].ssh_username, host, self.input.ui_conf['chrome_path']))

    def _wait_for_selenium_is_started(self, timeout=10):
        start_time = time.time()
        while (time.time() - start_time) < timeout:
            log = open("/tmp/selenium.log")
            if log.read().find('Started org.openqa.jetty.jetty.Server') > -1:
                log.close()
                if self._is_selenium_running():
                    time.sleep(1)
                    return
            time.sleep(1)

    def _start_selenium_thread(self):
        self.t = Thread(target=self._start_selenium,
                       name="selenium",
                       args=())
        self.t.start()

    def _is_selenium_running(self):
        host = self.input.ui_conf['selenium_ip']
        if host in ['localhost', '127.0.0.1']:
             cmd = 'ps -ef|grep selenium-server'
        else:
            cmd = "ssh {0}@{1} 'bash -s' < 'ps -ef|grep selenium-server'"
        output = commands.getstatusoutput(cmd)
        if str(output).find('selenium-server-standalone') > -1:
            return True
        return False

    @unittest.skipIf(skip_setup_failed, "setup was failed")
    def setUp(self):
        try:
            self.log = logger.Logger.get_logger()
            self.input = TestInputSingleton.input
            self.servers = self.input.servers
            self.browser = self.input.ui_conf['browser']
            self.replica  = self.input.param("replica", 1)
            self.case_number = self.input.param("case_number", 0)
            self.cluster = Cluster()
            #avoid clean up if the previous test has been tear down
            if not self.input.param("skip_cleanup", True) \
                                            or self.case_number == 1:
                self.tearDown()
            self._log_start(self)
            #thread for selenium server
            if not self._is_selenium_running():
                self.log.info('start selenium')
                self._start_selenium_thread()
                self._wait_for_selenium_is_started()
            self.log.info('start selenium session')
            if self.browser == 'ff':
                self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                               .format(self.input.ui_conf['selenium_ip'],
                                                       self.input.ui_conf['selenium_port']),
                                               desired_capabilities=DesiredCapabilities.FIREFOX)
            elif self.browser == 'chrome':
                self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                               .format(self.input.ui_conf['selenium_ip'],
                                                       self.input.ui_conf['selenium_port']),
                                               desired_capabilities=DesiredCapabilities.CHROME)
            self.log.info('start selenium started')
            self.driver.get("http://{0}:{1}".format(self.servers[0].ip,
                                                    self.servers[0].port))
            self.driver.maximize_window()
        except Exception as ex:
            skip_setup_failed = True
            self.fail(ex)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(),
                                              self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(),
                                               self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def tearDown(self):
        try:
            path_screen = self.input.param('screenshots', 'logs/screens')
            full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)
            self.log.info('screenshot is available: %s' % full_path)
            if not os.path.exists(path_screen):
                os.mkdir(path_screen)
            self.driver.get_screenshot_as_file(os.path.abspath(full_path))
            rest = RestConnection(self.servers[0])
            if rest._rebalance_progress_status() == 'running':
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            #for server in self.servers:
            #    ClusterOperationHelper.cleanup_cluster([server])
            #ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
            self.driver.close()
        finally:
            self.cluster.shutdown()
Esempio n. 59
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.pre_warmup_stats = {}
        try:
            self.wait_timeout = self.input.param("wait_timeout", 60)
            # number of case that is performed from testrunner( increment each time)
            self.case_number = self.input.param("case_number", 0)
            self.default_bucket = self.input.param("default_bucket", True)
            if self.default_bucket:
                self.default_bucket_name = "default"
            self.standard_buckets = self.input.param("standard_buckets", 0)
            self.sasl_buckets = self.input.param("sasl_buckets", 0)
            self.memcached_buckets = self.input.param("memcached_buckets", 0)
            self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
            self.num_servers = self.input.param("servers", len(self.servers))
            # initial number of items in the cluster
            self.nodes_init = self.input.param("nodes_init", 1)
            self.nodes_in = self.input.param("nodes_in", 1)
            self.nodes_out = self.input.param("nodes_out", 1)

            self.num_replicas = self.input.param("replicas", 1)
            self.num_items = self.input.param("items", 1000)
            self.value_size = self.input.param("value_size", 512)
            self.dgm_run = self.input.param("dgm_run", False)
            # max items number to verify in ValidateDataTask, None - verify all
            self.max_verify = self.input.param("max_verify", None)
            # we don't change consistent_view on server by default
            self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
            self.rebalanceIndexWaitingDisabled = self.input.param("rebalanceIndexWaitingDisabled", None)
            self.rebalanceIndexPausingDisabled = self.input.param("rebalanceIndexPausingDisabled", None)
            self.maxParallelIndexers = self.input.param("maxParallelIndexers", None)
            self.maxParallelReplicaIndexers = self.input.param("maxParallelReplicaIndexers", None)
            self.quota_percent = self.input.param("quota_percent", None)
            self.port = None
            if self.input.param("port", None):
                self.port = str(self.input.param("port", None))
            self.log.info(
                "==============  basetestcase setup was started for test #{0} {1}==============".format(
                    self.case_number, self._testMethodName
                )
            )
            # avoid any cluster operations in setup for new upgrade & upgradeXDCR tests
            if str(self.__class__).find("newupgradetests") != -1 or str(self.__class__).find("upgradeXDCR") != -1:
                self.log.info("any cluster operation in setup will be skipped")
                self.log.info(
                    "==============  basetestcase setup was finished for test #{0} {1} ==============".format(
                        self.case_number, self._testMethodName
                    )
                )
                return
            # avoid clean up if the previous test has been tear down
            if not self.input.param("skip_cleanup", True) or self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    self.log.warn("teardDown for previous test failed. will retry..")
                    self.case_number -= 1000
                self.tearDown()
                self.cluster = Cluster()

            self.quota = self._initialize_nodes(
                self.cluster,
                self.servers,
                self.disabled_consistent_view,
                self.rebalanceIndexWaitingDisabled,
                self.rebalanceIndexPausingDisabled,
                self.maxParallelIndexers,
                self.maxParallelReplicaIndexers,
                self.port,
            )

            if str(self.__class__).find("rebalanceout.RebalanceOutTests") != -1:
                # rebalance all nodes into the cluster before each test
                self.cluster.rebalance(self.servers[: self.num_servers], self.servers[1 : self.num_servers], [])
            elif self.nodes_init > 1:
                self.cluster.rebalance(self.servers[:1], self.servers[1 : self.nodes_init], [])
            elif str(self.__class__).find("ViewQueryTests") != -1 and not self.input.param("skip_rebalance", False):
                self.cluster.rebalance(self.servers, self.servers[1:], [])
            if self.dgm_run:
                self.quota = 256
            if self.total_buckets > 0:
                self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
            if str(self.__class__).find("newupgradetests") == -1:
                self._bucket_creation()
            self.log.info(
                "==============  basetestcase setup was finished for test #{0} {1} ==============".format(
                    self.case_number, self._testMethodName
                )
            )
            self._log_start(self)
        except Exception, e:
            self.cluster.shutdown()
            self.fail(e)