Beispiel #1
0
    def uninstall(self, params):
        remote_client = RemoteMachineShellConnection(params["server"])
        #remote_client.membase_uninstall()

        self.nsis = 'nsis' in params and params['nsis'].lower() == 'true'
        remote_client.couchbase_uninstall(windows_nsis=self.nsis)
        remote_client.disconnect()
Beispiel #2
0
    def _test_install(self,serverInfo,version,builds):
        query = BuildQuery()
        info = self.machine_infos[serverInfo.ip]
        names = ['membase-server-enterprise',
                 'membase-server-community',
                 'couchbase-server-enterprise',
                 'couchbase-server-community']
        build = None
        for name in names:
            build = query.find_membase_build(builds,
                                             name,
                                             info.deliverable_type,
                                             info.architecture_type,
                                             version.strip())
            if build:
                break

        if not build:
            self.fail('unable to find any {0} build for {1} for arch : {2} '.format(info.distribution_type,
                                                                                    info.architecture_type,
                                                                                    version.strip()))
        print 'for machine : ', info.architecture_type, info.distribution_type, 'relevant build : ', build
        remote_client = RemoteMachineShellConnection(serverInfo)
        remote_client.membase_uninstall()
        remote_client.couchbase_uninstall()
        if 'amazon' in self.input.test_params:
            build.url = build.url.replace("http://builds.hq.northscale.net/latestbuilds/",
                                          "http://packages.northscale.com/latestbuilds/")
            build.url = build.url.replace("enterprise", "community")
            build.name = build.name.replace("enterprise", "community")
        downloaded = remote_client.download_build(build)
        self.assertTrue(downloaded, 'unable to download binaries :'.format(build.url))
        remote_client.membase_install(build)
        #TODO: we should poll the 8091 port until it is up and running
        self.log.info('wait 5 seconds for membase server to start')
        time.sleep(5)
        start_time = time.time()
        cluster_initialized = False
        while time.time() < (start_time + (10 * 60)):
            rest = RestConnection(serverInfo)
            try:
                if serverInfo.data_path:
                    self.log.info("setting data path to " + serverInfo.data_path)
                    rest.set_data_path(serverInfo.data_path)
                rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
                cluster_initialized = True
                break
            except ServerUnavailableException:
                self.log.error("error happened while initializing the cluster @ {0}".format(serverInfo.ip))
            self.log.info('sleep for 5 seconds before trying again ...')
            time.sleep(5)
        self.assertTrue(cluster_initialized,
                        "error happened while initializing the cluster @ {0}".format(serverInfo.ip))
        if not cluster_initialized:
            self.log.error("error happened while initializing the cluster @ {0}".format(serverInfo.ip))
            raise Exception("error happened while initializing the cluster @ {0}".format(serverInfo.ip))
        nodeinfo = rest.get_nodes_self()
        rest.init_cluster_memoryQuota(memoryQuota=nodeinfo.mcdMemoryReserved)
        rest.init_cluster_memoryQuota(256)
Beispiel #3
0
    def test_backup_upgrade_restore_default(self):
        if len(self.servers) < 2:
            self.log.error("At least 2 servers required for this test ..")
            return
        original_set = copy.copy(self.servers)
        worker = self.servers[len(self.servers) - 1]
        self.servers = self.servers[:len(self.servers) - 1]
        shell = RemoteMachineShellConnection(self.master)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        fin = o[0]
        shell.disconnect()
        initial_version = self.input.param("initial_version", fin)
        final_version = self.input.param("final_version", fin)
        if initial_version == final_version:
            self.log.error("Same initial and final versions ..")
            return
        if not final_version.startswith('2.0'):
            self.log.error("Upgrade test not set to run from 1.8.1 -> 2.0 ..")
            return
        builds, changes = BuildQuery().get_all_builds()
        product = 'couchbase-server-enterprise'
        #CASE where the worker isn't a 2.0+
        worker_flag = 0
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        temp = o[0]
        if not temp.startswith('2.0'):
            worker_flag = 1
        if worker_flag == 1:
            self.log.info(
                "Loading version {0} on worker.. ".format(final_version))
            remote = RemoteMachineShellConnection(worker)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_build(builds, product,
                                                  info.deliverable_type,
                                                  info.architecture_type,
                                                  final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(older_build)
            remote.install_server(older_build)
            remote.disconnect()

        remote_tmp = "{1}/{0}".format("backup", "/root")
        perm_comm = "mkdir -p {0}".format(remote_tmp)
        if not initial_version == fin:
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info(
                    "Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product,
                                                      info.deliverable_type,
                                                      info.architecture_type,
                                                      initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

        self.common_setUp()
        bucket = "default"
        if len(self.servers) > 1:
            self.add_nodes_and_rebalance()
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(
            servers=[self.master],
            name=bucket,
            ram_load_ratio=0.5,
            value_size_distribution=distribution,
            moxi=True,
            write_only=True,
            delete_ratio=0.1,
            number_of_threads=2)
        if len(self.servers) > 1:
            rest = RestConnection(self.master)
            self.assertTrue(RebalanceHelper.wait_for_replication(
                rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket,
                                                      'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command(perm_comm)
        shell.log_command_output(o, r)
        shell.disconnect()

        #Backup
        #BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command(
            "/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
                self.master.ip, self.master.port, remote_tmp))
        shell.disconnect()
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        time.sleep(30)

        #Upgrade
        for server in self.servers:
            self.log.info(
                "Upgrading to current version {0}".format(final_version))
            remote = RemoteMachineShellConnection(server)
            info = remote.extract_remote_info()
            new_build = BuildQuery().find_build(builds, product,
                                                info.deliverable_type,
                                                info.architecture_type,
                                                final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(new_build)
            remote.install_server(new_build)
            rest = RestConnection(server)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(server.rest_username, server.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()
        time.sleep(30)

        #Restore
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        #BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command(
            "/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format(
                self.master.ip, self.master.port, remote_tmp, bucket))
        shell.disconnect()
        time.sleep(60)
        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(
            inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
        time.sleep(30)
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        rest = RestConnection(self.master)
        helper = RestHelper(rest)
        nodes = rest.node_statuses()
        master_id = rest.get_nodes_self().id
        if len(self.servers) > 1:
            removed = helper.remove_nodes(
                knownNodes=[node.id for node in nodes],
                ejectedNodes=[
                    node.id for node in nodes if node.id != master_id
                ],
                wait_for_rebalance=True)

        shell = RemoteMachineShellConnection(worker)
        shell.remove_directory(remote_tmp)
        shell.disconnect()

        self.servers = copy.copy(original_set)
        if initial_version == fin:
            builds, changes = BuildQuery().get_all_builds()
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info(
                    "Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product,
                                                      info.deliverable_type,
                                                      info.architecture_type,
                                                      initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if initial_version.startswith(
                "1.7") and input.test_params['version'].startswith("1.8"):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith(
                "1.7"):
            product = 'membase-server-enterprise'
        else:
            product = 'couchbase-server-enterprise'
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        # check to see if we are installing from latestbuilds or releases
        # note: for newer releases (1.8.0) even release versions can have the
        #  form 1.8.0r-55
        if re.search('r', initial_version):
            builds, changes = BuildQuery().get_all_builds()
            older_build = BuildQuery().find_membase_build(
                builds,
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon)
        else:
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon)
        remote.stop_membase()
        remote.stop_couchbase()
        remote.download_build(older_build)
        #now let's install ?
        remote.membase_install(older_build)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster_port(rest_settings.rest_username,
                               rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self,
                                             server,
                                             bucket_data,
                                             howmany=2)
        version = input.test_params['version']

        appropriate_build = _get_build(servers[0],
                                       version,
                                       is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url,
                        msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build,
                               save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster_port(rest_settings.rest_username,
                               rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        #verify admin_creds still set

        self.assertTrue(pools_info['implementationVersion'],
                        appropriate_build.product_version)
        if initialize_cluster:
            #TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'bucket-0', rest),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], server,
                        bucket.name, self)
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[],
                             do_new_rest=False):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing an online upgrade")
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        save_upgrade_config = False
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith(
                "1.7"):
            product = 'membase-server-enterprise'
        else:
            product = 'couchbase-server-enterprise'
        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            rest = RestConnection(server)
            info = remote.extract_remote_info()
            # check to see if we are installing from latestbuilds or releases
            # note: for newer releases (1.8.0) even release versions can have the
            #  form 1.8.0r-55
            if re.search('r', initial_version):
                builds, changes = BuildQuery().get_all_builds()
                older_build = BuildQuery().find_membase_build(
                    builds,
                    deliverable_type=info.deliverable_type,
                    os_architecture=info.architecture_type,
                    build_version=initial_version,
                    product=product,
                    is_amazon=is_amazon)

            else:
                older_build = BuildQuery().find_membase_release_build(
                    deliverable_type=info.deliverable_type,
                    os_architecture=info.architecture_type,
                    build_version=initial_version,
                    product=product,
                    is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            remote.stop_membase()
            remote.stop_couchbase()
            remote.download_build(older_build)
            #now let's install ?
            remote.membase_install(older_build)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username,
                                   rest_settings.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master, servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(
            rebalanceStarted,
            "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(
            master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(
            rebalanceSucceeded,
            "rebalance operation for nodes: {0} was not successful".format(
                otpNodeIds))

        if initial_version == "1.7.0" or initial_version == "1.7.1":
            self._save_config(rest_settings, master)

        input_version = input.test_params['version']
        node_upgrade_path.append(input_version)
        current_version = initial_version
        previous_version = current_version
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version,
                                                   node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                previous_version = current_version
                current_version = version
                if version != initial_version:
                    log.info("Upgrading to version {0}".format(version))
                    self._stop_membase_servers(servers)
                    if previous_version.startswith(
                            "1.7") and current_version.startswith("1.8"):
                        save_upgrade_config = True
                    # No need to save the upgrade config from 180 to 181
                    if previous_version.startswith(
                            "1.8.0") and current_version.startswith("1.8.1"):
                        save_upgrade_config = False
                    appropriate_build = _get_build(servers[0],
                                                   version,
                                                   is_amazon=is_amazon)
                    self.assertTrue(
                        appropriate_build.url,
                        msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        remote.download_build(appropriate_build)
                        remote.membase_upgrade(
                            appropriate_build,
                            save_upgrade_config=save_upgrade_config)
                        RestHelper(
                            RestConnection(server)).is_ns_server_running(
                                testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'],
                                        appropriate_build.product_version)

                        if start_upgraded_first:
                            log.info("Starting server {0} post upgrade".format(
                                server))
                            remote.start_membase()
                        else:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)
                    if version == "1.7.0" or version == "1.7.1":
                        self._save_config(rest_settings, master)

                    if create_buckets:
                        self.assertTrue(
                            BucketOperationHelper.wait_for_bucket_creation(
                                'default', RestConnection(master)),
                            msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(
                    master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip,
                                        node.port) == "{0}:{1}".format(
                                            server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes,
                                              ejectedNodes=toBeEjectedNodes)
                self.assertTrue(
                    removed,
                    msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.download_build(appropriate_build)
                # if initial version is 180
                # Don't uninstall the server
                if not initial_version.startswith('1.8.0'):
                    remote.membase_uninstall()
                    remote.couchbase_uninstall()
                    remote.membase_install(appropriate_build)
                else:
                    remote.membase_upgrade(appropriate_build)

                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                log.info(
                    "sleep for 10 seconds to wait for membase-server to start..."
                )
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster_port(rest_settings.rest_username,
                                       rest_settings.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(
                    master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                # Issue rest call to the newly added node
                # MB-5108
                if do_new_rest:
                    master = server
                    rest = RestConnection(master)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(
                    rebalanceStarted,
                    "unable to start rebalance on master node {0}".format(
                        master.ip))
                log.info(
                    'started rebalance operation on master node {0}'.format(
                        master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(
                    rebalanceSucceeded,
                    "rebalance operation for nodes: {0} was not successful".
                    format(otpNodeIds))

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], master,
                        bucket.name, self)
Beispiel #6
0
 def uninstall(self, params):
     remote_client = RemoteMachineShellConnection(params["server"])
     remote_client.membase_uninstall()
     remote_client.couchbase_uninstall()
     remote_client.disconnect()
Beispiel #7
0
    def _install_and_upgrade(self, initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        log = logger.Logger.get_logger()
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if re.search('1.8',input.test_params['version']):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get('amazon',False):
            is_amazon = True
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
                                                              os_architecture=info.architecture_type,
                                                              build_version=initial_version,
                                                              product='membase-server-enterprise', is_amazon=is_amazon)
        remote.execute_command('/etc/init.d/membase-server stop')
        remote.download_build(older_build)
        #now let's install ?
        remote.membase_install(older_build)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self, server, bucket_data, howmany=2)
        version = input.test_params['version']

        appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        #verify admin_creds still set

        self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)
        if initialize_cluster:
            #TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('bucket-0', rest),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
                                                               server,
                                                               bucket.name, self)
Beispiel #8
0
    def _install_and_upgrade(self, initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[]):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing a rolling upgrade")
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        save_upgrade_config = False
        is_amazon = False
        if input.test_params.get('amazon',False):
            is_amazon = True
        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            rest = RestConnection(server)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
                                                              os_architecture=info.architecture_type,
                                                              build_version=initial_version,
                                                              product='membase-server-enterprise', is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            remote.execute_command('/etc/init.d/membase-server stop')
            remote.download_build(older_build)
            #now let's install ?
            remote.membase_install(older_build)
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master,
                                                       servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(rebalanceStarted,
                        "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(rebalanceSucceeded,
                        "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))

        if initial_version == "1.7.0" or initial_version == "1.7.1":
            self._save_config(rest_settings, master)

        input_version = input.test_params['version']
        node_upgrade_path.append(input_version)
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version, node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                if version is not initial_version:
                    log.info("Upgrading to version {0}".format(version))
                    self._stop_membase_servers(servers)
                    if re.search('1.8', version):
                        save_upgrade_config = True

                    appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
                    self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        remote.download_build(appropriate_build)
                        remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
                        RestHelper(RestConnection(server)).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)

                        if start_upgraded_first:
                            log.info("Starting server {0} post upgrade".format(server))
                            remote.start_membase()
                        else:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)
                    if version == "1.7.0" or version == "1.7.1":
                        self._save_config(rest_settings, master)

                    if create_buckets:
                        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('default', RestConnection(master)),
                                        msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip, node.port) == "{0}:{1}".format(server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes, ejectedNodes=toBeEjectedNodes)
                self.assertTrue(removed, msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.membase_uninstall()
                remote.couchbase_uninstall()
                remote.download_build(appropriate_build)
                remote.membase_install(appropriate_build)
                RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
                log.info("sleep for 10 seconds to wait for membase-server to start...")
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(rebalanceStarted,
                                "unable to start rebalance on master node {0}".format(master.ip))
                log.info('started rebalance operation on master node {0}'.format(master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(rebalanceSucceeded,
                                "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
                #ClusterOperationHelper.verify_persistence(servers, self)

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
                                                               master,
                                                               bucket.name, self)
Beispiel #9
0
    def _install_and_upgrade(self, initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        log = logger.Logger.get_logger()
        input = TestInputSingleton.input
        version = input.test_params['version']
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        is_amazon = False
        if input.test_params.get('amazon',False):
            is_amazon = True
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        #release_builds = BuildQuery().get_all_release_builds(initial_version)

        #if initial_version == "1.7.2":
         #   initial_version = "1.7.2r-20"
        older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
                                                              os_architecture=info.architecture_type,
                                                              build_version=initial_version,
                                                              product='membase-server-enterprise', is_amazon=is_amazon)
        if info.type.lower() == 'windows':
            if older_build.product_version.startswith("1.8"):
                abbr_product = "cb"
            else:
                abbr_product = "mb"
            remote.download_binary_in_win(older_build.url, abbr_product, initial_version)
            remote.membase_install_win(older_build, initial_version)
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
            bucket_data = {}
            if initialize_cluster:
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                if create_buckets:
                    _create_load_multiple_bucket(self, server, bucket_data, howmany=2)
            if version.startswith("1.8"):
                abbr_product = "cb"
            appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
            remote.download_binary_in_win(appropriate_build.url, abbr_product, version)
            remote.stop_membase()
            log.info("###### START UPGRADE. #########")
            remote.membase_upgrade_win(info.architecture_type, info.windows_name, version, initial_version)
            remote.disconnect()
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

            pools_info = rest.get_pools_info()

            rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
            time.sleep(TIMEOUT_SECS)
            # verify admin_creds still set

            self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)
            if initialize_cluster:
                #TODO: how can i verify that the cluster init config is preserved
                if create_buckets:
                    self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('bucket-0', rest),
                                    msg="bucket 'default' does not exist..")
                if insert_data:
                    buckets = rest.get_buckets()
                    for bucket in buckets:
                        BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
                                                                   server,
                                                                   bucket.name, self)
        else:
            log.error("This is not windows server!")
    def _install_and_upgrade(
        self, initial_version="1.6.5.3", initialize_cluster=False, create_buckets=False, insert_data=False
    ):
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if initial_version.startswith("1.7") and input.test_params["version"].startswith("1.8"):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get("amazon", False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith("1.7"):
            product = "membase-server-enterprise"
        else:
            product = "couchbase-server-enterprise"
        remote = RemoteMachineShellConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        # check to see if we are installing from latestbuilds or releases
        # note: for newer releases (1.8.0) even release versions can have the
        #  form 1.8.0r-55
        if re.search("r", initial_version):
            builds, changes = BuildQuery().get_all_builds()
            older_build = BuildQuery().find_membase_build(
                builds,
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon,
            )
        else:
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon,
            )
        remote.stop_membase()
        remote.stop_couchbase()
        remote.download_build(older_build)
        # now let's install ?
        remote.install_server(older_build)
        rest = RestConnection(server)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self, server, bucket_data, howmany=2)
        version = input.test_params["version"]

        appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        # verify admin_creds still set

        self.assertTrue(pools_info["implementationVersion"], appropriate_build.product_version)
        if initialize_cluster:
            # TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(
                    BucketOperationHelper.wait_for_bucket_creation("bucket-0", rest),
                    msg="bucket 'default' does not exist..",
                )
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], server, bucket.name, self
                    )
    def test_backup_upgrade_restore_default(self):
        if len(self.servers) < 2:
            self.log.error("At least 2 servers required for this test ..")
            return
        original_set = copy.copy(self.servers)
        worker = self.servers[len(self.servers) - 1]
        self.servers = self.servers[:len(self.servers)-1]
        shell = RemoteMachineShellConnection(self.master)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        fin = o[0]
        shell.disconnect()
        initial_version = self.input.param("initial_version", fin)
        final_version = self.input.param("final_version", fin)
        if initial_version==final_version:
            self.log.error("Same initial and final versions ..")
            return
        if not final_version.startswith('2.0'):
            self.log.error("Upgrade test not set to run from 1.8.1 -> 2.0 ..")
            return
        builds, changes = BuildQuery().get_all_builds(version=final_version)
        product = 'couchbase-server-enterprise'
        #CASE where the worker isn't a 2.0+
        worker_flag = 0
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        temp = o[0]
        if not temp.startswith('2.0'):
            worker_flag = 1
        if worker_flag == 1:
            self.log.info("Loading version {0} on worker.. ".format(final_version))
            remote = RemoteMachineShellConnection(worker)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                  info.architecture_type, final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(older_build)
            remote.install_server(older_build)
            remote.disconnect()

        remote_tmp = "{1}/{0}".format("backup", "/root")
        perm_comm = "mkdir -p {0}".format(remote_tmp)
        if not initial_version == fin:
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info("Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                      info.architecture_type, initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

        self.common_setUp()
        bucket = "default"
        if len(self.servers) > 1:
            self.add_nodes_and_rebalance()
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket,
                                                                                             ram_load_ratio=0.5,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             delete_ratio=0.1,
                                                                                             number_of_threads=2)
        if len(self.servers) > 1:
            rest = RestConnection(self.master)
            self.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command(perm_comm)
        shell.log_command_output(o, r)
        shell.disconnect()

        #Backup
        #BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command("/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
                                                            self.master.ip, self.master.port, remote_tmp))
        shell.disconnect()
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        time.sleep(30)

        #Upgrade
        for server in self.servers:
            self.log.info("Upgrading to current version {0}".format(final_version))
            remote = RemoteMachineShellConnection(server)
            info = remote.extract_remote_info()
            new_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                info.architecture_type, final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(new_build)
            remote.install_server(new_build)
            rest = RestConnection(server)
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(server.rest_username, server.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()
        time.sleep(30)

        #Restore
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        #BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command("/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format(
                                                            self.master.ip, self.master.port, remote_tmp, bucket))
        shell.disconnect()
        time.sleep(60)
        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
        time.sleep(30)
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        rest = RestConnection(self.master)
        helper = RestHelper(rest)
        nodes = rest.node_statuses()
        master_id = rest.get_nodes_self().id
        if len(self.servers) > 1:
                removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
                                          ejectedNodes=[node.id for node in nodes if node.id != master_id],
                                          wait_for_rebalance=True   )

        shell = RemoteMachineShellConnection(worker)
        shell.remove_directory(remote_tmp)
        shell.disconnect()

        self.servers = copy.copy(original_set)
        if initial_version == fin:
            builds, changes = BuildQuery().get_all_builds(version=initial_version)
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info("Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                      info.architecture_type, initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()
Beispiel #12
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        log = logger.Logger.get_logger()
        input = TestInputSingleton.input
        version = input.test_params['version']
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        #release_builds = BuildQuery().get_all_release_builds(initial_version)

        #if initial_version == "1.7.2":
        #   initial_version = "1.7.2r-20"
        older_build = BuildQuery().find_membase_release_build(
            deliverable_type=info.deliverable_type,
            os_architecture=info.architecture_type,
            build_version=initial_version,
            product='membase-server-enterprise',
            is_amazon=is_amazon)
        if info.type.lower() == 'windows':
            if older_build.product_version.startswith("1.8"):
                abbr_product = "cb"
            else:
                abbr_product = "mb"
            remote.download_binary_in_win(older_build.url, abbr_product,
                                          initial_version)
            remote.install_server_win(older_build, initial_version)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(rest_settings.rest_username,
                              rest_settings.rest_password)
            bucket_data = {}
            if initialize_cluster:
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                if create_buckets:
                    _create_load_multiple_bucket(self,
                                                 server,
                                                 bucket_data,
                                                 howmany=2)
            if version.startswith("1.8"):
                abbr_product = "cb"
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            remote.download_binary_in_win(appropriate_build.url, abbr_product,
                                          version)
            remote.stop_membase()
            log.info("###### START UPGRADE. #########")
            remote.membase_upgrade_win(info.architecture_type,
                                       info.windows_name, version,
                                       initial_version)
            remote.disconnect()
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)

            pools_info = rest.get_pools_info()

            rest.init_cluster(rest_settings.rest_username,
                              rest_settings.rest_password)
            time.sleep(TIMEOUT_SECS)
            # verify admin_creds still set

            self.assertTrue(pools_info['implementationVersion'],
                            appropriate_build.product_version)
            if initialize_cluster:
                #TODO: how can i verify that the cluster init config is preserved
                if create_buckets:
                    self.assertTrue(
                        BucketOperationHelper.wait_for_bucket_creation(
                            'bucket-0', rest),
                        msg="bucket 'default' does not exist..")
                if insert_data:
                    buckets = rest.get_buckets()
                    for bucket in buckets:
                        BucketOperationHelper.keys_exist_or_assert(
                            bucket_data[bucket.name]["inserted_keys"], server,
                            bucket.name, self)
        else:
            log.error("This is not windows server!")
Beispiel #13
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[]):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing a rolling upgrade")
        input = TestInputSingleton.input
        input_version = input.test_params['version']
        rest_settings = input.membase_settings
        servers = input.servers
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True

        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            rest = RestConnection(server)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product='membase-server-enterprise',
                is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            if older_build.product_version.startswith("1.8"):
                abbr_product = "cb"
            else:
                abbr_product = "mb"
            remote.download_binary_in_win(older_build.url, abbr_product,
                                          initial_version)
            #now let's install ?
            remote.install_server_win(older_build, initial_version)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(rest_settings.rest_username,
                              rest_settings.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master, servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(
            rebalanceStarted,
            "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(
            master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(
            rebalanceSucceeded,
            "rebalance operation for nodes: {0} was not successful".format(
                otpNodeIds))

        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        #if initial_version == "1.7.0" or initial_version == "1.7.1":
        #   self._save_config(rest_settings, master)

        node_upgrade_path.append(input_version)
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version,
                                                   node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                if version is not initial_version:
                    log.info(
                        "SHUTDOWN ALL CB OR MB SERVERS IN CLUSTER BEFORE DOING UPGRADE"
                    )
                    for server in servers:
                        shell = RemoteMachineShellConnection(server)
                        shell.stop_membase()
                        shell.disconnect()
                    log.info("Upgrading to version {0}".format(version))
                    appropriate_build = _get_build(servers[0],
                                                   version,
                                                   is_amazon=is_amazon)
                    self.assertTrue(
                        appropriate_build.url,
                        msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        if version.startswith("1.8"):
                            abbr_product = "cb"
                        remote.download_binary_in_win(appropriate_build.url,
                                                      abbr_product, version)
                        log.info("###### START UPGRADE. #########")
                        remote.membase_upgrade_win(info.architecture_type,
                                                   info.windows_name, version,
                                                   initial_version)
                        RestHelper(
                            RestConnection(server)).is_ns_server_running(
                                testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'],
                                        appropriate_build.product_version)

                        if not start_upgraded_first:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)

                    if create_buckets:
                        self.assertTrue(
                            BucketOperationHelper.wait_for_bucket_creation(
                                'default', RestConnection(master)),
                            msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            if version.startswith("1.8"):
                abbr_product = "cb"
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(
                    master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip,
                                        node.port) == "{0}:{1}".format(
                                            server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes,
                                              ejectedNodes=toBeEjectedNodes)
                self.assertTrue(
                    removed,
                    msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.membase_uninstall()
                remote.couchbase_uninstall()
                if appropriate_build.product == 'membase-server-enterprise':
                    abbr_product = "mb"
                else:
                    abbr_product = "cb"
                remote.download_binary_in_win(appropriate_build.url,
                                              abbr_product, version)
                remote.install_server_win(appropriate_build, version)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster(rest_settings.rest_username,
                                  rest_settings.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(
                    master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                log.info(
                    "wait 30 seconds before asking older node for start rebalance"
                )
                time.sleep(30)
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(
                    rebalanceStarted,
                    "unable to start rebalance on master node {0}".format(
                        master.ip))
                log.info(
                    'started rebalance operation on master node {0}'.format(
                        master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(
                    rebalanceSucceeded,
                    "rebalance operation for nodes: {0} was not successful".
                    format(otpNodeIds))
                #ClusterOperationHelper.verify_persistence(servers, self)

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], master,
                        bucket.name, self)