Пример #1
0
 def _verify_data(self, master, rest, inserted_keys):
     log = logger.Logger.get_logger()
     log.info("Verifying data")
     ready = RebalanceHelper.wait_for_stats_on_all(master, "default", "ep_queue_size", 0)
     self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     ready = RebalanceHelper.wait_for_stats_on_all(master, "default", "ep_flusher_todo", 0)
     self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     BucketOperationHelper.keys_exist_or_assert(keys=inserted_keys, server=master, bucket_name="default", test=self)
Пример #2
0
 def _verify_data(self, master, rest, inserted_keys):
     log = logger.Logger.get_logger()
     log.info("Verifying data")
     ready = RebalanceHelper.wait_for_stats_on_all(master, 'default', 'ep_queue_size', 0)
     self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     ready = RebalanceHelper.wait_for_stats_on_all(master, 'default', 'ep_flusher_todo', 0)
     self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     BucketOperationHelper.keys_exist_or_assert(keys=inserted_keys, server=master, bucket_name='default',
                                                                test=self)
Пример #3
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if initial_version.startswith(
                "1.7") and input.test_params['version'].startswith("1.8"):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith(
                "1.7"):
            product = 'membase-server-enterprise'
        else:
            product = 'couchbase-server-enterprise'
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        # check to see if we are installing from latestbuilds or releases
        # note: for newer releases (1.8.0) even release versions can have the
        #  form 1.8.0r-55
        if re.search('r', initial_version):
            builds, changes = BuildQuery().get_all_builds()
            older_build = BuildQuery().find_membase_build(
                builds,
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon)
        else:
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon)
        remote.stop_membase()
        remote.stop_couchbase()
        remote.download_build(older_build)
        #now let's install ?
        remote.membase_install(older_build)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster_port(rest_settings.rest_username,
                               rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self,
                                             server,
                                             bucket_data,
                                             howmany=2)
        version = input.test_params['version']

        appropriate_build = _get_build(servers[0],
                                       version,
                                       is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url,
                        msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build,
                               save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster_port(rest_settings.rest_username,
                               rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        #verify admin_creds still set

        self.assertTrue(pools_info['implementationVersion'],
                        appropriate_build.product_version)
        if initialize_cluster:
            #TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'bucket-0', rest),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], server,
                        bucket.name, self)
Пример #4
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[],
                             do_new_rest=False):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing an online upgrade")
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        save_upgrade_config = False
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith(
                "1.7"):
            product = 'membase-server-enterprise'
        else:
            product = 'couchbase-server-enterprise'
        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            rest = RestConnection(server)
            info = remote.extract_remote_info()
            # check to see if we are installing from latestbuilds or releases
            # note: for newer releases (1.8.0) even release versions can have the
            #  form 1.8.0r-55
            if re.search('r', initial_version):
                builds, changes = BuildQuery().get_all_builds()
                older_build = BuildQuery().find_membase_build(
                    builds,
                    deliverable_type=info.deliverable_type,
                    os_architecture=info.architecture_type,
                    build_version=initial_version,
                    product=product,
                    is_amazon=is_amazon)

            else:
                older_build = BuildQuery().find_membase_release_build(
                    deliverable_type=info.deliverable_type,
                    os_architecture=info.architecture_type,
                    build_version=initial_version,
                    product=product,
                    is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            remote.stop_membase()
            remote.stop_couchbase()
            remote.download_build(older_build)
            #now let's install ?
            remote.membase_install(older_build)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username,
                                   rest_settings.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master, servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(
            rebalanceStarted,
            "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(
            master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(
            rebalanceSucceeded,
            "rebalance operation for nodes: {0} was not successful".format(
                otpNodeIds))

        if initial_version == "1.7.0" or initial_version == "1.7.1":
            self._save_config(rest_settings, master)

        input_version = input.test_params['version']
        node_upgrade_path.append(input_version)
        current_version = initial_version
        previous_version = current_version
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version,
                                                   node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                previous_version = current_version
                current_version = version
                if version != initial_version:
                    log.info("Upgrading to version {0}".format(version))
                    self._stop_membase_servers(servers)
                    if previous_version.startswith(
                            "1.7") and current_version.startswith("1.8"):
                        save_upgrade_config = True
                    # No need to save the upgrade config from 180 to 181
                    if previous_version.startswith(
                            "1.8.0") and current_version.startswith("1.8.1"):
                        save_upgrade_config = False
                    appropriate_build = _get_build(servers[0],
                                                   version,
                                                   is_amazon=is_amazon)
                    self.assertTrue(
                        appropriate_build.url,
                        msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        remote.download_build(appropriate_build)
                        remote.membase_upgrade(
                            appropriate_build,
                            save_upgrade_config=save_upgrade_config)
                        RestHelper(
                            RestConnection(server)).is_ns_server_running(
                                testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'],
                                        appropriate_build.product_version)

                        if start_upgraded_first:
                            log.info("Starting server {0} post upgrade".format(
                                server))
                            remote.start_membase()
                        else:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)
                    if version == "1.7.0" or version == "1.7.1":
                        self._save_config(rest_settings, master)

                    if create_buckets:
                        self.assertTrue(
                            BucketOperationHelper.wait_for_bucket_creation(
                                'default', RestConnection(master)),
                            msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(
                    master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip,
                                        node.port) == "{0}:{1}".format(
                                            server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes,
                                              ejectedNodes=toBeEjectedNodes)
                self.assertTrue(
                    removed,
                    msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.download_build(appropriate_build)
                # if initial version is 180
                # Don't uninstall the server
                if not initial_version.startswith('1.8.0'):
                    remote.membase_uninstall()
                    remote.couchbase_uninstall()
                    remote.membase_install(appropriate_build)
                else:
                    remote.membase_upgrade(appropriate_build)

                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                log.info(
                    "sleep for 10 seconds to wait for membase-server to start..."
                )
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster_port(rest_settings.rest_username,
                                       rest_settings.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(
                    master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                # Issue rest call to the newly added node
                # MB-5108
                if do_new_rest:
                    master = server
                    rest = RestConnection(master)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(
                    rebalanceStarted,
                    "unable to start rebalance on master node {0}".format(
                        master.ip))
                log.info(
                    'started rebalance operation on master node {0}'.format(
                        master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(
                    rebalanceSucceeded,
                    "rebalance operation for nodes: {0} was not successful".
                    format(otpNodeIds))

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], master,
                        bucket.name, self)
Пример #5
0
    def _install_and_upgrade(self, initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        log = logger.Logger.get_logger()
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if re.search('1.8',input.test_params['version']):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get('amazon',False):
            is_amazon = True
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
                                                              os_architecture=info.architecture_type,
                                                              build_version=initial_version,
                                                              product='membase-server-enterprise', is_amazon=is_amazon)
        remote.execute_command('/etc/init.d/membase-server stop')
        remote.download_build(older_build)
        #now let's install ?
        remote.membase_install(older_build)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self, server, bucket_data, howmany=2)
        version = input.test_params['version']

        appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        #verify admin_creds still set

        self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)
        if initialize_cluster:
            #TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('bucket-0', rest),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
                                                               server,
                                                               bucket.name, self)
Пример #6
0
    def _install_and_upgrade(self, initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[]):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing a rolling upgrade")
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        save_upgrade_config = False
        is_amazon = False
        if input.test_params.get('amazon',False):
            is_amazon = True
        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            rest = RestConnection(server)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
                                                              os_architecture=info.architecture_type,
                                                              build_version=initial_version,
                                                              product='membase-server-enterprise', is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            remote.execute_command('/etc/init.d/membase-server stop')
            remote.download_build(older_build)
            #now let's install ?
            remote.membase_install(older_build)
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master,
                                                       servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(rebalanceStarted,
                        "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(rebalanceSucceeded,
                        "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))

        if initial_version == "1.7.0" or initial_version == "1.7.1":
            self._save_config(rest_settings, master)

        input_version = input.test_params['version']
        node_upgrade_path.append(input_version)
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version, node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                if version is not initial_version:
                    log.info("Upgrading to version {0}".format(version))
                    self._stop_membase_servers(servers)
                    if re.search('1.8', version):
                        save_upgrade_config = True

                    appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
                    self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        remote.download_build(appropriate_build)
                        remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
                        RestHelper(RestConnection(server)).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)

                        if start_upgraded_first:
                            log.info("Starting server {0} post upgrade".format(server))
                            remote.start_membase()
                        else:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)
                    if version == "1.7.0" or version == "1.7.1":
                        self._save_config(rest_settings, master)

                    if create_buckets:
                        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('default', RestConnection(master)),
                                        msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip, node.port) == "{0}:{1}".format(server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes, ejectedNodes=toBeEjectedNodes)
                self.assertTrue(removed, msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.membase_uninstall()
                remote.couchbase_uninstall()
                remote.download_build(appropriate_build)
                remote.membase_install(appropriate_build)
                RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
                log.info("sleep for 10 seconds to wait for membase-server to start...")
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(rebalanceStarted,
                                "unable to start rebalance on master node {0}".format(master.ip))
                log.info('started rebalance operation on master node {0}'.format(master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(rebalanceSucceeded,
                                "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
                #ClusterOperationHelper.verify_persistence(servers, self)

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
                                                               master,
                                                               bucket.name, self)
Пример #7
0
    def _install_and_upgrade(self, initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        log = logger.Logger.get_logger()
        input = TestInputSingleton.input
        version = input.test_params['version']
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        is_amazon = False
        if input.test_params.get('amazon',False):
            is_amazon = True
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        #release_builds = BuildQuery().get_all_release_builds(initial_version)

        #if initial_version == "1.7.2":
         #   initial_version = "1.7.2r-20"
        older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
                                                              os_architecture=info.architecture_type,
                                                              build_version=initial_version,
                                                              product='membase-server-enterprise', is_amazon=is_amazon)
        if info.type.lower() == 'windows':
            if older_build.product_version.startswith("1.8"):
                abbr_product = "cb"
            else:
                abbr_product = "mb"
            remote.download_binary_in_win(older_build.url, abbr_product, initial_version)
            remote.membase_install_win(older_build, initial_version)
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
            bucket_data = {}
            if initialize_cluster:
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                if create_buckets:
                    _create_load_multiple_bucket(self, server, bucket_data, howmany=2)
            if version.startswith("1.8"):
                abbr_product = "cb"
            appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
            remote.download_binary_in_win(appropriate_build.url, abbr_product, version)
            remote.stop_membase()
            log.info("###### START UPGRADE. #########")
            remote.membase_upgrade_win(info.architecture_type, info.windows_name, version, initial_version)
            remote.disconnect()
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

            pools_info = rest.get_pools_info()

            rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
            time.sleep(TIMEOUT_SECS)
            # verify admin_creds still set

            self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)
            if initialize_cluster:
                #TODO: how can i verify that the cluster init config is preserved
                if create_buckets:
                    self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('bucket-0', rest),
                                    msg="bucket 'default' does not exist..")
                if insert_data:
                    buckets = rest.get_buckets()
                    for bucket in buckets:
                        BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
                                                                   server,
                                                                   bucket.name, self)
        else:
            log.error("This is not windows server!")
Пример #8
0
    def _install_and_upgrade(
        self, initial_version="1.6.5.3", initialize_cluster=False, create_buckets=False, insert_data=False
    ):
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if initial_version.startswith("1.7") and input.test_params["version"].startswith("1.8"):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get("amazon", False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith("1.7"):
            product = "membase-server-enterprise"
        else:
            product = "couchbase-server-enterprise"
        remote = RemoteMachineShellConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        # check to see if we are installing from latestbuilds or releases
        # note: for newer releases (1.8.0) even release versions can have the
        #  form 1.8.0r-55
        if re.search("r", initial_version):
            builds, changes = BuildQuery().get_all_builds()
            older_build = BuildQuery().find_membase_build(
                builds,
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon,
            )
        else:
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon,
            )
        remote.stop_membase()
        remote.stop_couchbase()
        remote.download_build(older_build)
        # now let's install ?
        remote.install_server(older_build)
        rest = RestConnection(server)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self, server, bucket_data, howmany=2)
        version = input.test_params["version"]

        appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        # verify admin_creds still set

        self.assertTrue(pools_info["implementationVersion"], appropriate_build.product_version)
        if initialize_cluster:
            # TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(
                    BucketOperationHelper.wait_for_bucket_creation("bucket-0", rest),
                    msg="bucket 'default' does not exist..",
                )
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], server, bucket.name, self
                    )
Пример #9
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        log = logger.Logger.get_logger()
        input = TestInputSingleton.input
        version = input.test_params['version']
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        #release_builds = BuildQuery().get_all_release_builds(initial_version)

        #if initial_version == "1.7.2":
        #   initial_version = "1.7.2r-20"
        older_build = BuildQuery().find_membase_release_build(
            deliverable_type=info.deliverable_type,
            os_architecture=info.architecture_type,
            build_version=initial_version,
            product='membase-server-enterprise',
            is_amazon=is_amazon)
        if info.type.lower() == 'windows':
            if older_build.product_version.startswith("1.8"):
                abbr_product = "cb"
            else:
                abbr_product = "mb"
            remote.download_binary_in_win(older_build.url, abbr_product,
                                          initial_version)
            remote.install_server_win(older_build, initial_version)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(rest_settings.rest_username,
                              rest_settings.rest_password)
            bucket_data = {}
            if initialize_cluster:
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                if create_buckets:
                    _create_load_multiple_bucket(self,
                                                 server,
                                                 bucket_data,
                                                 howmany=2)
            if version.startswith("1.8"):
                abbr_product = "cb"
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            remote.download_binary_in_win(appropriate_build.url, abbr_product,
                                          version)
            remote.stop_membase()
            log.info("###### START UPGRADE. #########")
            remote.membase_upgrade_win(info.architecture_type,
                                       info.windows_name, version,
                                       initial_version)
            remote.disconnect()
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)

            pools_info = rest.get_pools_info()

            rest.init_cluster(rest_settings.rest_username,
                              rest_settings.rest_password)
            time.sleep(TIMEOUT_SECS)
            # verify admin_creds still set

            self.assertTrue(pools_info['implementationVersion'],
                            appropriate_build.product_version)
            if initialize_cluster:
                #TODO: how can i verify that the cluster init config is preserved
                if create_buckets:
                    self.assertTrue(
                        BucketOperationHelper.wait_for_bucket_creation(
                            'bucket-0', rest),
                        msg="bucket 'default' does not exist..")
                if insert_data:
                    buckets = rest.get_buckets()
                    for bucket in buckets:
                        BucketOperationHelper.keys_exist_or_assert(
                            bucket_data[bucket.name]["inserted_keys"], server,
                            bucket.name, self)
        else:
            log.error("This is not windows server!")
Пример #10
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[]):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing a rolling upgrade")
        input = TestInputSingleton.input
        input_version = input.test_params['version']
        rest_settings = input.membase_settings
        servers = input.servers
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True

        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            rest = RestConnection(server)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product='membase-server-enterprise',
                is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            if older_build.product_version.startswith("1.8"):
                abbr_product = "cb"
            else:
                abbr_product = "mb"
            remote.download_binary_in_win(older_build.url, abbr_product,
                                          initial_version)
            #now let's install ?
            remote.install_server_win(older_build, initial_version)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(rest_settings.rest_username,
                              rest_settings.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master, servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(
            rebalanceStarted,
            "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(
            master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(
            rebalanceSucceeded,
            "rebalance operation for nodes: {0} was not successful".format(
                otpNodeIds))

        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        #if initial_version == "1.7.0" or initial_version == "1.7.1":
        #   self._save_config(rest_settings, master)

        node_upgrade_path.append(input_version)
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version,
                                                   node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                if version is not initial_version:
                    log.info(
                        "SHUTDOWN ALL CB OR MB SERVERS IN CLUSTER BEFORE DOING UPGRADE"
                    )
                    for server in servers:
                        shell = RemoteMachineShellConnection(server)
                        shell.stop_membase()
                        shell.disconnect()
                    log.info("Upgrading to version {0}".format(version))
                    appropriate_build = _get_build(servers[0],
                                                   version,
                                                   is_amazon=is_amazon)
                    self.assertTrue(
                        appropriate_build.url,
                        msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        if version.startswith("1.8"):
                            abbr_product = "cb"
                        remote.download_binary_in_win(appropriate_build.url,
                                                      abbr_product, version)
                        log.info("###### START UPGRADE. #########")
                        remote.membase_upgrade_win(info.architecture_type,
                                                   info.windows_name, version,
                                                   initial_version)
                        RestHelper(
                            RestConnection(server)).is_ns_server_running(
                                testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'],
                                        appropriate_build.product_version)

                        if not start_upgraded_first:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)

                    if create_buckets:
                        self.assertTrue(
                            BucketOperationHelper.wait_for_bucket_creation(
                                'default', RestConnection(master)),
                            msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            if version.startswith("1.8"):
                abbr_product = "cb"
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(
                    master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip,
                                        node.port) == "{0}:{1}".format(
                                            server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes,
                                              ejectedNodes=toBeEjectedNodes)
                self.assertTrue(
                    removed,
                    msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.membase_uninstall()
                remote.couchbase_uninstall()
                if appropriate_build.product == 'membase-server-enterprise':
                    abbr_product = "mb"
                else:
                    abbr_product = "cb"
                remote.download_binary_in_win(appropriate_build.url,
                                              abbr_product, version)
                remote.install_server_win(appropriate_build, version)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster(rest_settings.rest_username,
                                  rest_settings.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(
                    master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                log.info(
                    "wait 30 seconds before asking older node for start rebalance"
                )
                time.sleep(30)
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(
                    rebalanceStarted,
                    "unable to start rebalance on master node {0}".format(
                        master.ip))
                log.info(
                    'started rebalance operation on master node {0}'.format(
                        master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(
                    rebalanceSucceeded,
                    "rebalance operation for nodes: {0} was not successful".
                    format(otpNodeIds))
                #ClusterOperationHelper.verify_persistence(servers, self)

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], master,
                        bucket.name, self)