Ejemplo n.º 1
0
    def common_setup(self):
        self.cluster_helper = Cluster()
        self.log = logger.Logger.get_logger()
        self.cluster_run = False
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if len(set([server.ip for server in self.servers])) == 1:
            ip = rest.get_nodes_self().ip
            for server in self.servers:
                server.ip = ip
            self.cluster_run = True
        self.case_number = self.input.param("case_number", 0)
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 1000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)
        self.load_started = False
        self.loaders = []
        try:
            # Clear the state from Previous invalid run
            if rest._rebalance_progress_status() == "running":
                self.log.warning("rebalancing is still running, previous test should be verified")
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            self.log.info(
                "==============  SwapRebalanceBase setup was started for test #{0} {1}==============".format(
                    self.case_number, self._testMethodName
                )
            )
            SwapRebalanceBase.reset(self)

            # Make sure the test is setup correctly
            min_servers = int(self.num_initial_servers) + int(self.num_swap)
            msg = "minimum {0} nodes required for running swap rebalance"
            self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))

            self.log.info("picking server : {0} as the master".format(serverInfo))
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
            info = rest.get_nodes_self()
            rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
            if self.num_buckets > 10:
                BaseTestCase.change_max_buckets(self, self.num_buckets)
            self.log.info(
                "==============  SwapRebalanceBase setup was finished for test #{0} {1} ==============".format(
                    self.case_number, self._testMethodName
                )
            )
            SwapRebalanceBase._log_start(self)
        except Exception, e:
            self.cluster_helper.shutdown()
            self.fail(e)
Ejemplo n.º 2
0
class SauceTestCase(BaseTestCase):
    def setUp(self):
        super(SauceTestCase, self).setUp()

        class PO(Page):
            pass

        self.PO = PO

    @raises(exceptions.MissingSauceOptionError)
    def test_missing_sauce_apikey_should_raise_missing_sauce_option_error(
            self):
        self.set_baseurl_env(base_file=False,
                             arbitrary_base="http://www.ncbi.nlm.nih.gov")
        os.environ["PO_SAUCE_USERNAME"] = "******"
        self.PO.uri = "/foo"
        self.PO()

    @raises(exceptions.MissingSauceOptionError)
    def test_missing_sauce_username_should_raise_missing_sauce_option_error(
            self):
        self.set_baseurl_env(base_file=False,
                             arbitrary_base="http://www.ncbi.nlm.nih.gov")
        os.environ["PO_SAUCE_APIKEY"] = "abc"
        self.PO.uri = "/foo"
        self.PO()

    @raises(exceptions.SauceConnectionError)
    def test_sauce_connection_error(self):
        self.set_baseurl_env(base_file=False,
                             arbitrary_base="http://www.ncbi.nlm.nih.gov")
        os.environ["PO_BROWSER"] = "Firefox"
        os.environ["PO_SAUCE_BROWSERVERSION"] = "27"
        os.environ["PO_SAUCE_USERNAME"] = "******"
        os.environ["PO_SAUCE_APIKEY"] = "bar"
        os.environ["PO_SAUCE_PLATFORM"] = "Windows 8.1"
        self.PO.uri = "/foo"
        p = self.PO()
        p.open()

    @skipUnless(BaseTestCase.are_sauce_creds_set_for_testing(),
                "SAUCE_USERNAME and SAUCE_APIKEY env vars must be set to test")
    @raises(selenium.common.exceptions.WebDriverException)
    def test_sauce_invalid_browser(self):
        self.set_baseurl_env(base_file=False,
                             arbitrary_base="http://www.ncbi.nlm.nih.gov")
        os.environ["PO_BROWSER"] = "Firefox"
        os.environ["PO_SAUCE_BROWSERVERSION"] = "27"
        username, apikey = self.get_sauce_creds()
        os.environ["PO_SAUCE_USERNAME"] = username
        os.environ["PO_SAUCE_APIKEY"] = apikey
        os.environ["PO_SAUCE_PLATFORM"] = "Winows 8.1"
        self.PO.uri = "/foo"
        p = self.PO()
        p.open()
Ejemplo n.º 3
0
 def setUp(self):
     self.input = TestInputSingleton.input
     self.input.test_params.update({"default_bucket":False})
     BaseTestCase.setUp(self)
     self.rest = RestConnection(self.servers[0])
     self.op_type = self.input.param("op_type", "create")
     self.tasks = []         # To have all tasks running in parallel.
     self._iter_count = 0    # To keep a check of how many items are deleted
     self.available_servers = list()
     self.available_servers = self.cluster.servers[self.nodes_init:]
     self.num_buckets = self.input.param("num_buckets", 1)
     self.mutate = 0
     self.doc_ops = self.input.param("doc_ops", None)
     if self.doc_ops:
         self.doc_ops = self.doc_ops.split(';')
     self.iterations = self.input.param("iterations", 2)
     self.vbucket_check = self.input.param("vbucket_check", True)
     self.new_num_writer_threads = self.input.param(
         "new_num_writer_threads", 6)
     self.new_num_reader_threads = self.input.param(
         "new_num_reader_threads", 8)
Ejemplo n.º 4
0
 def setUp(self):
     self.input = TestInputSingleton.input
     self.input.test_params.update({"default_bucket": False})
     BaseTestCase.setUp(self)
     self.rest = RestConnection(self.servers[0])
     self.op_type = self.input.param("op_type", "create")
     self.available_servers = list()
     self.available_servers = self.cluster.servers[self.nodes_init:]
     self.num_buckets = self.input.param("num_buckets", 1)
     self.mutate = 0
     self.doc_ops = self.input.param("doc_ops", None)
     if self.doc_ops:
         self.doc_ops = self.doc_ops.split(';')
     self.iterations = self.input.param("iterations", 2)
     self.vbucket_check = self.input.param("vbucket_check", True)
     self.new_num_writer_threads = self.input.param(
         "new_num_writer_threads", 6)
     self.new_num_reader_threads = self.input.param(
         "new_num_reader_threads", 8)
     self.create_perc = 100
     self.update_perc = self.input.param("update_perc", 50)
     self.delete_perc = self.input.param("delete_perc", 50)
     self.expiry_perc = self.input.param("expiry_perc", 0)
     self.start = 0
     self.end = 0
     self.initial_items = self.start
     self.final_items = self.end
     self.create_end = 0
     self.create_start = 0
     self.update_end = 0
     self.update_start = 0
     self.delete_end = 0
     self.delete_start = 0
     self.expire_end = 0
     self.expire_start = 0
     self.num_collections = self.input.param("num_collections", 10)
 def setUp(self, add_defualt_cbas_node=True):
     self.input = TestInputSingleton.input
     self.input.test_params.update({"default_bucket": False})
     BaseTestCase.setUp(self)
     self.rest = RestConnection(self.master)
Ejemplo n.º 6
0
class SauceTestCase(BaseTestCase):
    """
    Sauce exception tests are in the unit tests, not the
    functional tests.
    """
    def get_job_data(self, sid):
        username, apikey = self.get_sauce_creds()
        rest_url = "https://%s:%[email protected]/rest/v1/%s/jobs/%s" % (
            username, apikey, username, sid)
        resp = requests.get(rest_url)
        return json.loads(resp.content)

    def get_sid_from_log(self, is_robot=False):
        log_path = self.get_log_path(is_robot)
        try:
            f = open(log_path)
            content = f.read()
            try:
                return re.search(r"session ID: (.{32})", content).group(1)
            except (AttributeError, IndexError):
                raise Exception("Couldn't get the session ID from the log %s" %
                                log_path)

        except OSError:
            raise "Couldn't find a log file at %s" % log_path
        except IOError:
            raise Exception("Couldn't open log file %s" % log_path)
        finally:
            f.close()

    @unittest.skipUnless(BaseTestCase.are_sauce_creds_set_for_testing(),
                         "Must set 'SAUCE_USERNAME' and 'SAUCE_APIKEY' ("
                         "not PO_SAUCE."
                         ".) "
                         "as an env "
                         "variables to run this test")
    def test_sauce_unittest(self):
        self.assertFalse(os.path.exists(self.get_log_path()))
        run = self.run_scenario("test_sauce.py")
        job_data = self.get_job_data(self.get_sid_from_log())

        # Just check an arbitrary entry in the job data returned from sauce.
        self.assertEquals(job_data["browser"], "firefox",
                          "The job ran in Sauce")

        # We expect this to fail, because the test makes a purposely false assertion
        # to test that we can assert against things going on in Sauce.
        self.assert_run(
            run,
            expected_returncode=1,
            search_output="Title should have been 'foo' but was 'Home - "
            "PubMed - NCBI")

    @unittest.skipUnless(BaseTestCase.are_sauce_creds_set_for_testing(),
                         "Must set 'SAUCE_USERNAME' and 'SAUCE_APIKEY' ("
                         "not "
                         "PO_SAUCE..) "
                         "as an env "
                         "variables to run this test")
    def test_sauce_robot(self):
        self.assertFalse(os.path.exists(self.get_log_path(is_robot=True)))
        run = self.run_scenario("test_sauce.robot",
                                variablefile=os.path.join(
                                    self.test_dir, "sauce_vars.py"))

        job_data = self.get_job_data(self.get_sid_from_log(is_robot=True))

        # Just check an arbitrary entry in the job data returned from sauce.
        self.assertEquals(job_data["browser"], "firefox",
                          "The job ran in Sauce")
        self.assert_run(
            run,
            expected_returncode=1,
            search_output="Title should have been 'foo' but was 'Home - "
            "PubMed - NCBI")
Ejemplo n.º 7
0
    def common_setup(self):
        self.cluster_helper = Cluster()
        self.log = logger.Logger.get_logger()
        self.cluster_run = False
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if len(set([server.ip for server in self.servers])) == 1:
            ip = rest.get_nodes_self().ip
            for server in self.servers:
                server.ip = ip
            self.cluster_run = True
        self.case_number = self.input.param("case_number", 0)
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 1000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param(
            "swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)
        self.load_started = False
        self.loaders = []
        try:
            # Clear the state from Previous invalid run
            if rest._rebalance_progress_status() == 'running':
                self.log.warning(
                    "rebalancing is still running, previous test should be verified"
                )
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
            SwapRebalanceBase.reset(self)

            # Make sure the test is setup correctly
            min_servers = int(self.num_initial_servers) + int(self.num_swap)
            msg = "minimum {0} nodes required for running swap rebalance"
            self.assertTrue(len(self.servers) >= min_servers,
                            msg=msg.format(min_servers))

            self.log.info(
                'picking server : {0} as the master'.format(serverInfo))
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                self.servers)
            info = rest.get_nodes_self()
            rest.init_cluster(username=serverInfo.rest_username,
                              password=serverInfo.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
            SwapRebalanceBase.enable_diag_eval_on_non_local_hosts(
                self, serverInfo)
            # Add built-in user
            testuser = [{
                'id': 'cbadminbucket',
                'name': 'cbadminbucket',
                'password': '******'
            }]
            RbacBase().create_user_source(testuser, 'builtin', self.servers[0])

            # Assign user to role
            role_list = [{
                'id': 'cbadminbucket',
                'name': 'cbadminbucket',
                'roles': 'admin'
            }]
            RbacBase().add_user_role(role_list,
                                     RestConnection(self.servers[0]),
                                     'builtin')

            if self.num_buckets > 10:
                BaseTestCase.change_max_buckets(self, self.num_buckets)
            self.log.info(
                "==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                .format(self.case_number, self._testMethodName))
            SwapRebalanceBase._log_start(self)
        except Exception, e:
            self.cluster_helper.shutdown()
            self.fail(e)
Ejemplo n.º 8
0
    def _common_test_body_failed_swap_rebalance(self):
        master = self.servers[0]
        rest = RestConnection(master)
        num_initial_servers = self.num_initial_servers
        creds = self.input.membase_settings
        intial_severs = self.servers[:num_initial_servers]

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)

        self.log.info("DATA LOAD PHASE")
        loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        current_nodes = RebalanceHelper.getOtpNodeIds(master)
        self.log.info("current nodes : {0}".format(current_nodes))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
        optNodesIds = [node.id for node in toBeEjectedNodes]
        if self.swap_orchestrator:
            status, content = ClusterHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            # When swapping all the nodes
            if self.num_swap is len(current_nodes):
                optNodesIds.append(content)
            else:
                optNodesIds[0] = content

        for node in optNodesIds:
            self.log.info("removing node {0} and rebalance afterwards".format(node))

        new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.swap_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        self.log.info("DATA ACCESS PHASE")
        loaders = SwapRebalanceBase.start_access_phase(self, master)

        self.log.info("SWAP REBALANCE PHASE")
        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
            ejectedNodes=optNodesIds)

        # Rebalance is failed at 20%, 40% and 60% completion
        for i in [1, 2, 3]:
            expected_progress = 20 * i
            self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(expected_progress))
            RestHelper(rest).rebalance_reached(expected_progress)
            bucket = rest.get_buckets()[0].name
            pid = StatsCommon.get_stats([master], bucket, "", "pid")[master]
            command = "os:cmd(\"kill -9 {0} \")".format(pid)
            self.log.info(command)
            killed = rest.diag_eval(command)
            self.log.info("killed {0}:{1}??  {2} ".format(master.ip, master.port, killed))
            BaseTestCase._wait_warmup_completed(self, [master], bucket, wait_time=600)
            time.sleep(5)

            rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
                ejectedNodes=optNodesIds)

        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(toBeEjectedNodes))

        # Stop loaders
        SwapRebalanceBase.stop_load(loaders)

        self.log.info("DONE DATA ACCESS PHASE")
        #for bucket in rest.get_buckets():
        #    SwapRebalanceBase.verify_data(new_swap_servers[0], bucket_data[bucket.name].get('inserted_keys'),\
        #        bucket.name, self)
        #    RebalanceHelper.wait_for_persistence(master, bucket.name)

        self.log.info("VERIFICATION PHASE")
        SwapRebalanceBase.items_verification(master, self)
Ejemplo n.º 9
0
    def _common_test_body_failed_swap_rebalance(self):
        master = self.servers[0]
        rest = RestConnection(master)
        num_initial_servers = self.num_initial_servers
        creds = self.input.membase_settings
        intial_severs = self.servers[:num_initial_servers]

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)

        self.log.info("DATA LOAD PHASE")
        loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        current_nodes = RebalanceHelper.getOtpNodeIds(master)
        self.log.info("current nodes : {0}".format(current_nodes))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master,
                                                      howmany=self.num_swap)
        optNodesIds = [node.id for node in toBeEjectedNodes]
        if self.swap_orchestrator:
            status, content = ClusterHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            # When swapping all the nodes
            if self.num_swap is len(current_nodes):
                optNodesIds.append(content)
            else:
                optNodesIds[0] = content

        for node in optNodesIds:
            self.log.info(
                "removing node {0} and rebalance afterwards".format(node))

        new_swap_servers = self.servers[
            num_initial_servers:num_initial_servers + self.num_swap]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password,
                                    server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.swap_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        self.log.info("DATA ACCESS PHASE")
        loaders = SwapRebalanceBase.start_access_phase(self, master)

        self.log.info("SWAP REBALANCE PHASE")
        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
                       ejectedNodes=optNodesIds)

        # Rebalance is failed at 20%, 40% and 60% completion
        for i in [1, 2, 3]:
            expected_progress = 20 * i
            self.log.info(
                "FAIL SWAP REBALANCE PHASE @ {0}".format(expected_progress))
            RestHelper(rest).rebalance_reached(expected_progress)
            bucket = rest.get_buckets()[0].name
            pid = StatsCommon.get_stats([master], bucket, "", "pid")[master]
            command = "os:cmd(\"kill -9 {0} \")".format(pid)
            self.log.info(command)
            killed = rest.diag_eval(command)
            self.log.info("killed {0}:{1}??  {2} ".format(
                master.ip, master.port, killed))
            BaseTestCase._wait_warmup_completed(self, [master],
                                                bucket,
                                                wait_time=600)
            time.sleep(5)

            rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
                           ejectedNodes=optNodesIds)

        self.assertTrue(
            rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(
                toBeEjectedNodes))

        # Stop loaders
        SwapRebalanceBase.stop_load(loaders)

        self.log.info("DONE DATA ACCESS PHASE")
        #for bucket in rest.get_buckets():
        #    SwapRebalanceBase.verify_data(new_swap_servers[0], bucket_data[bucket.name].get('inserted_keys'),\
        #        bucket.name, self)
        #    RebalanceHelper.wait_for_persistence(master, bucket.name)

        self.log.info("VERIFICATION PHASE")
        SwapRebalanceBase.items_verification(master, self)