def install(self, params): # log = logger.new_logger("Installer") build = self.build_url(params) remote_client = RemoteMachineShellConnection(params["server"]) info = remote_client.extract_remote_info() type = info.type.lower() server = params["server"] if "vbuckets" in params: vbuckets = int(params["vbuckets"][0]) else: vbuckets = None if type == "windows": build = self.build_url(params) remote_client.download_binary_in_win(build.url, params["product"], params["version"]) remote_client.membase_install_win(build, params["version"]) else: downloaded = remote_client.download_build(build) if not downloaded: log.error(downloaded, "unable to download binaries : {0}".format(build.url)) path = server.data_path or "/tmp" remote_client.membase_install(build, path=path, vbuckets=vbuckets) ready = RestHelper(RestConnection(params["server"])).is_ns_server_running(60) if not ready: log.error("membase-server did not start...") log.info("wait 5 seconds for membase server to start") time.sleep(5)
def install(self, params): # log = logger.new_logger("Installer") build = self.build_url(params) remote_client = RemoteMachineShellConnection(params["server"]) info = remote_client.extract_remote_info() type = info.type.lower() server = params["server"] if "vbuckets" in params: vbuckets = int(params["vbuckets"][0]) else: vbuckets = None if type == "windows": build = self.build_url(params) remote_client.download_binary_in_win(build.url, params["product"], params["version"]) remote_client.membase_install_win(build, params["version"]) else: downloaded = remote_client.download_build(build) if not downloaded: log.error(downloaded, 'unable to download binaries : {0}'.format(build.url)) #TODO: need separate methods in remote_util for couchbase and membase install path = server.data_path or '/tmp' remote_client.membase_install(build, path=path, vbuckets=vbuckets) log.info('wait 5 seconds for membase server to start') time.sleep(5) if "rest_vbuckets" in params: rest_vbuckets = int(params["rest_vbuckets"]) ClusterOperationHelper.set_vbuckets(server, rest_vbuckets)
def install(self, params): # log = logger.new_logger("Installer") build = self.build_url(params) remote_client = RemoteMachineShellConnection(params["server"]) info = remote_client.extract_remote_info() type = info.type.lower() server = params["server"] if "vbuckets" in params: vbuckets = int(params["vbuckets"][0]) else: vbuckets = None if type == "windows": build = self.build_url(params) remote_client.download_binary_in_win(build.url, params["product"], params["version"]) remote_client.membase_install_win(build, params["version"]) else: downloaded = remote_client.download_build(build) if not downloaded: log.error( downloaded, 'unable to download binaries : {0}'.format(build.url)) path = server.data_path or '/tmp' remote_client.membase_install(build, path=path, vbuckets=vbuckets) ready = RestHelper(RestConnection( params["server"])).is_ns_server_running(60) if not ready: log.error("membase-server did not start...") log.info('wait 5 seconds for membase server to start') time.sleep(5)
def _install_and_upgrade(self, initial_version='1.6.5.3', initialize_cluster=False, create_buckets=False, insert_data=False): log = logger.Logger.get_logger() input = TestInputSingleton.input version = input.test_params['version'] rest_settings = input.membase_settings servers = input.servers server = servers[0] is_amazon = False if input.test_params.get('amazon',False): is_amazon = True remote = RemoteMachineShellConnection(server) rest = RestConnection(server) info = remote.extract_remote_info() remote.membase_uninstall() remote.couchbase_uninstall() builds, changes = BuildQuery().get_all_builds() #release_builds = BuildQuery().get_all_release_builds(initial_version) #if initial_version == "1.7.2": # initial_version = "1.7.2r-20" older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=initial_version, product='membase-server-enterprise', is_amazon=is_amazon) if info.type.lower() == 'windows': if older_build.product_version.startswith("1.8"): abbr_product = "cb" else: abbr_product = "mb" remote.download_binary_in_win(older_build.url, abbr_product, initial_version) remote.membase_install_win(older_build, initial_version) RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT) rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password) bucket_data = {} if initialize_cluster: rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved) if create_buckets: _create_load_multiple_bucket(self, server, bucket_data, howmany=2) if version.startswith("1.8"): abbr_product = "cb" appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version)) remote.download_binary_in_win(appropriate_build.url, abbr_product, version) remote.stop_membase() log.info("###### START UPGRADE. #########") remote.membase_upgrade_win(info.architecture_type, info.windows_name, version, initial_version) remote.disconnect() RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT) pools_info = rest.get_pools_info() rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password) time.sleep(TIMEOUT_SECS) # verify admin_creds still set self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version) if initialize_cluster: #TODO: how can i verify that the cluster init config is preserved if create_buckets: self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('bucket-0', rest), msg="bucket 'default' does not exist..") if insert_data: buckets = rest.get_buckets() for bucket in buckets: BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"], server, bucket.name, self) else: log.error("This is not windows server!")
def _install_and_upgrade(self, initial_version='1.6.5.3', create_buckets=False, insert_data=False, start_upgraded_first=True, load_ratio=-1, roll_upgrade=False, upgrade_path=[]): node_upgrade_path = [] node_upgrade_path.extend(upgrade_path) #then start them in whatever order you want inserted_keys = [] log = logger.Logger.get_logger() if roll_upgrade: log.info("performing a rolling upgrade") input = TestInputSingleton.input input_version = input.test_params['version'] rest_settings = input.membase_settings servers = input.servers is_amazon = False if input.test_params.get('amazon',False): is_amazon = True # install older build on all nodes for server in servers: remote = RemoteMachineShellConnection(server) rest = RestConnection(server) info = remote.extract_remote_info() older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=initial_version, product='membase-server-enterprise', is_amazon=is_amazon) remote.membase_uninstall() remote.couchbase_uninstall() if older_build.product_version.startswith("1.8"): abbr_product = "cb" else: abbr_product = "mb" remote.download_binary_in_win(older_build.url, abbr_product, initial_version) #now let's install ? remote.membase_install_win(older_build, initial_version) RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT) rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password) rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect() bucket_data = {} master = servers[0] # cluster all the nodes together ClusterOperationHelper.add_all_nodes_or_assert(master, servers, rest_settings, self) rest = RestConnection(master) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue(rebalanceStarted, "unable to start rebalance on master node {0}".format(master.ip)) log.info('started rebalance operation on master node {0}'.format(master.ip)) rebalanceSucceeded = rest.monitorRebalance() self.assertTrue(rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful".format(otpNodeIds)) if create_buckets: #let's create buckets #wait for the bucket #bucket port should also be configurable , pass it as the #parameter to this test ? later self._create_default_bucket(master) inserted_keys = self._load_data(master, load_ratio) _create_load_multiple_bucket(self, master, bucket_data, howmany=2) #if initial_version == "1.7.0" or initial_version == "1.7.1": # self._save_config(rest_settings, master) node_upgrade_path.append(input_version) #if we dont want to do roll_upgrade ? log.info("Upgrade path: {0} -> {1}".format(initial_version, node_upgrade_path)) log.info("List of servers {0}".format(servers)) if not roll_upgrade: for version in node_upgrade_path: if version is not initial_version: log.info("SHUTDOWN ALL CB OR MB SERVERS IN CLUSTER BEFORE DOING UPGRADE") for server in servers: shell = RemoteMachineShellConnection(server) shell.stop_membase() shell.disconnect() log.info("Upgrading to version {0}".format(version)) appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version)) for server in servers: remote = RemoteMachineShellConnection(server) if version.startswith("1.8"): abbr_product = "cb" remote.download_binary_in_win(appropriate_build.url, abbr_product, version) log.info("###### START UPGRADE. #########") remote.membase_upgrade_win(info.architecture_type, info.windows_name, version, initial_version) RestHelper(RestConnection(server)).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT) #verify admin_creds still set pools_info = RestConnection(server).get_pools_info() self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version) if not start_upgraded_first: remote.stop_membase() remote.disconnect() if not start_upgraded_first: log.info("Starting all servers together") self._start_membase_servers(servers) time.sleep(TIMEOUT_SECS) if create_buckets: self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('default', RestConnection(master)), msg="bucket 'default' does not exist..") if insert_data: self._verify_data(master, rest, inserted_keys) # rolling upgrade else: version = input.test_params['version'] if version.startswith("1.8"): abbr_product = "cb" appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version)) # rebalance node out # remove membase from node # install destination version onto node # rebalance it back into the cluster for server_index in range(len(servers)): server = servers[server_index] master = servers[server_index - 1] log.info("current master is {0}, rolling node is {1}".format(master, server)) rest = RestConnection(master) nodes = rest.node_statuses() allNodes = [] toBeEjectedNodes = [] for node in nodes: allNodes.append(node.id) if "{0}:{1}".format(node.ip, node.port) == "{0}:{1}".format(server.ip, server.port): toBeEjectedNodes.append(node.id) helper = RestHelper(rest) removed = helper.remove_nodes(knownNodes=allNodes, ejectedNodes=toBeEjectedNodes) self.assertTrue(removed, msg="Unable to remove nodes {0}".format(toBeEjectedNodes)) remote = RemoteMachineShellConnection(server) remote.membase_uninstall() remote.couchbase_uninstall() if appropriate_build.product == 'membase-server-enterprise': abbr_product = "mb" else: abbr_product = "cb" remote.download_binary_in_win(appropriate_build.url, abbr_product, version) remote.membase_install_win(appropriate_build, version) # remote.membase_install_win(appropriate_build) RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT) time.sleep(TIMEOUT_SECS) rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password) rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect() #readd this to the cluster ClusterOperationHelper.add_all_nodes_or_assert(master, [server], rest_settings, self) nodes = rest.node_statuses() log.info("wait 30 seconds before asking older node for start rebalance") time.sleep(30) otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue(rebalanceStarted, "unable to start rebalance on master node {0}".format(master.ip)) log.info('started rebalance operation on master node {0}'.format(master.ip)) rebalanceSucceeded = rest.monitorRebalance() self.assertTrue(rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful".format(otpNodeIds)) #ClusterOperationHelper.verify_persistence(servers, self) #TODO: how can i verify that the cluster init config is preserved # verify data on upgraded nodes if create_buckets: self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('default', RestConnection(master)), msg="bucket 'default' does not exist..") if insert_data: self._verify_data(master, rest, inserted_keys) rest = RestConnection(master) buckets = rest.get_buckets() for bucket in buckets: BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"], master, bucket.name, self)
def test_win_uninstall_standalone(self): query = BuildQuery() builds, changes = query.get_all_builds() os_version = self.input.test_params['win'] task = 'uninstall' ex_type = 'exe' bat_file = 'uninstall.bat' version_file = 'VERSION.txt' if self.input.test_params["ostype"] == '64': Arch = 'x86_64' os_type = '64' elif self.input.test_params["ostype"] == '32': Arch = 'x86' os_type = '32' else: ok = False self.log.error("Unknown os version.") product = self.input.test_params["product"] if product == 'cse': name = 'couchbase-server-enterprise' elif product == 'csse': name = 'couchbase-single-server-enterprise' elif product == 'csc': name = 'couchbase-server-community' elif product == 'cssc': name = 'couchbase-single-server-community' else: self.log.error("Unknon product type.") # no need later cb_server_alias = ['cse','csc'] cb_single_alias = ['csse','cssc'] if product in cb_server_alias: server_path = "/cygdrive/c/Program Files/Couchbase/Server/" elif product in cb_single_alias: server_path = "/cygdrive/c/Program Files (x86)/Couchbase/Server/" for serverInfo in self.servers: remote_client = RemoteMachineShellConnection(serverInfo) info = RemoteMachineShellConnection(serverInfo).extract_remote_info() exist = remote_client.file_exists(server_path, version_file) if exist: build_name, version = remote_client.find_build_version(server_path, version_file) self.log.info('build needed to do auto uninstall {0}'.format(build_name)) # find installed build in tmp directory build_name = build_name.rstrip() + ".exe" self.log.info('Check if {0} is in tmp directory'.format(build_name)) exist = remote_client.file_exists("/cygdrive/c/tmp/", build_name) if not exist: build = query.find_build(builds, name, ex_type, Arch, version) downloaded = remote_client.download_binary_in_win(build.url,product,version) if downloaded: self.log.info('Successful download {0}_{1}.exe'.format(product, version)) else: self.log.error('Download {0}_{1}.exe failed'.format(product, version)) # modify uninstall bat file to change build name. remote_client.modify_bat_file('/cygdrive/c/automation', bat_file, product, os_type, os_version, version, task) self.log.info('sleep for 5 seconds before running task schedule uninstall') time.sleep(5) # run task schedule to uninstall Couchbase Server self.log.info('Start to uninstall couchbase {0}_{1}'.format(product, version)) output, error = remote_client.execute_command("cmd /c schtasks /run /tn removeme") remote_client.log_command_output(output, error) remote_client.wait_till_file_deleted(server_path, version_file, timeout_in_seconds=600) self.log.info('sleep 15 seconds before running the next job ...') time.sleep(15) else: self.log.info('Couchbase server may not install on this server')
def test_win_install(self): query = BuildQuery() builds, changes = query.get_all_builds() version = self.input.test_params['version'] os_version = self.input.test_params['win'] task = 'install' ok = True ex_type = 'exe' bat_file = 'install.bat' version_file = 'VERSION.txt' if self.input.test_params["ostype"] == '64': Arch = 'x86_64' os_type = '64' elif self.input.test_params["ostype"] == '32': Arch = 'x86' os_type = '32' else: ok = False self.log.error("Unknown os version.") product = self.input.test_params["product"] if product == 'cse': name = 'couchbase-server-enterprise' elif product == 'csse': name = 'couchbase-single-server-enterprise' elif product == 'csc': name = 'couchbase-server-community' elif product == 'cssc': name = 'couchbase-single-server-community' else: ok = False self.log.error("Unknon product type.") cb_server_alias = ['cse','csc'] cb_single_alias = ['csse','cssc'] if product in cb_server_alias: server_path = "/cygdrive/c/Program Files/Couchbase/Server/" elif product in cb_single_alias: server_path = "/cygdrive/c/Program Files (x86)/Couchbase/Server/" if ok: for serverInfo in self.servers: remote_client = RemoteMachineShellConnection(serverInfo) info = RemoteMachineShellConnection(serverInfo).extract_remote_info() build = query.find_build(builds, name, ex_type, Arch, version) #self.log.info("what is this {0}".format(build.url)) # check if previous couchbase server installed exist = remote_client.file_exists("/cygdrive/c/Program Files/Couchbase/Server/", version_file) if exist: # call uninstall function to install couchbase server self.log.info("Start uninstall cb server on this server") self.test_win_uninstall(remote_client, product, os_type, os_version, version, server_path) else: self.log.info('I am free. You can install couchbase server now') # directory path in remote server used to create or delete directory dir_paths = ['/cygdrive/c/automation','/cygdrive/c/tmp'] remote_client.create_multiple_dir(dir_paths) # copy files from local server to remote server remote_client.copy_files_local_to_remote('resources/windows/automation', '/cygdrive/c/automation') downloaded = remote_client.download_binary_in_win(build.url,product,version) if downloaded: self.log.info('Successful download {0}_{1}.exe'.format(product, version)) else: self.log.error('Download {0}_{1}.exe failed'.format(product, version)) remote_client.modify_bat_file('/cygdrive/c/automation', bat_file, product, os_type, os_version, version, task) self.log.info('sleep for 5 seconds before running task schedule install me') time.sleep(5) # run task schedule to install Couchbase Server output, error = remote_client.execute_command("cmd /c schtasks /run /tn installme") remote_client.log_command_output(output, error) remote_client.wait_till_file_added(server_path, version_file, timeout_in_seconds=600) self.log.info('sleep 15 seconds before running the next job ...') time.sleep(15) else: self.log.error("Can not install Couchbase Server.")
def _install_and_upgrade(self, initial_version='1.6.5.3', initialize_cluster=False, create_buckets=False, insert_data=False): log = logger.Logger.get_logger() input = TestInputSingleton.input version = input.test_params['version'] rest_settings = input.membase_settings servers = input.servers server = servers[0] is_amazon = False if input.test_params.get('amazon', False): is_amazon = True remote = RemoteMachineShellConnection(server) rest = RestConnection(server) info = remote.extract_remote_info() remote.membase_uninstall() remote.couchbase_uninstall() builds, changes = BuildQuery().get_all_builds() #release_builds = BuildQuery().get_all_release_builds(initial_version) #if initial_version == "1.7.2": # initial_version = "1.7.2r-20" older_build = BuildQuery().find_membase_release_build( deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=initial_version, product='membase-server-enterprise', is_amazon=is_amazon) if info.type.lower() == 'windows': if older_build.product_version.startswith("1.8"): abbr_product = "cb" else: abbr_product = "mb" remote.download_binary_in_win(older_build.url, abbr_product, initial_version) remote.install_server_win(older_build, initial_version) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password) bucket_data = {} if initialize_cluster: rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) if create_buckets: _create_load_multiple_bucket(self, server, bucket_data, howmany=2) if version.startswith("1.8"): abbr_product = "cb" appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version)) remote.download_binary_in_win(appropriate_build.url, abbr_product, version) remote.stop_membase() log.info("###### START UPGRADE. #########") remote.membase_upgrade_win(info.architecture_type, info.windows_name, version, initial_version) remote.disconnect() RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) pools_info = rest.get_pools_info() rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password) time.sleep(TIMEOUT_SECS) # verify admin_creds still set self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version) if initialize_cluster: #TODO: how can i verify that the cluster init config is preserved if create_buckets: self.assertTrue( BucketOperationHelper.wait_for_bucket_creation( 'bucket-0', rest), msg="bucket 'default' does not exist..") if insert_data: buckets = rest.get_buckets() for bucket in buckets: BucketOperationHelper.keys_exist_or_assert( bucket_data[bucket.name]["inserted_keys"], server, bucket.name, self) else: log.error("This is not windows server!")
def _install_and_upgrade(self, initial_version='1.6.5.3', create_buckets=False, insert_data=False, start_upgraded_first=True, load_ratio=-1, roll_upgrade=False, upgrade_path=[]): node_upgrade_path = [] node_upgrade_path.extend(upgrade_path) #then start them in whatever order you want inserted_keys = [] log = logger.Logger.get_logger() if roll_upgrade: log.info("performing a rolling upgrade") input = TestInputSingleton.input input_version = input.test_params['version'] rest_settings = input.membase_settings servers = input.servers is_amazon = False if input.test_params.get('amazon', False): is_amazon = True # install older build on all nodes for server in servers: remote = RemoteMachineShellConnection(server) rest = RestConnection(server) info = remote.extract_remote_info() older_build = BuildQuery().find_membase_release_build( deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=initial_version, product='membase-server-enterprise', is_amazon=is_amazon) remote.membase_uninstall() remote.couchbase_uninstall() if older_build.product_version.startswith("1.8"): abbr_product = "cb" else: abbr_product = "mb" remote.download_binary_in_win(older_build.url, abbr_product, initial_version) #now let's install ? remote.install_server_win(older_build, initial_version) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password) rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect() bucket_data = {} master = servers[0] # cluster all the nodes together ClusterOperationHelper.add_all_nodes_or_assert(master, servers, rest_settings, self) rest = RestConnection(master) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue( rebalanceStarted, "unable to start rebalance on master node {0}".format(master.ip)) log.info('started rebalance operation on master node {0}'.format( master.ip)) rebalanceSucceeded = rest.monitorRebalance() self.assertTrue( rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful".format( otpNodeIds)) if create_buckets: #let's create buckets #wait for the bucket #bucket port should also be configurable , pass it as the #parameter to this test ? later self._create_default_bucket(master) inserted_keys = self._load_data(master, load_ratio) _create_load_multiple_bucket(self, master, bucket_data, howmany=2) #if initial_version == "1.7.0" or initial_version == "1.7.1": # self._save_config(rest_settings, master) node_upgrade_path.append(input_version) #if we dont want to do roll_upgrade ? log.info("Upgrade path: {0} -> {1}".format(initial_version, node_upgrade_path)) log.info("List of servers {0}".format(servers)) if not roll_upgrade: for version in node_upgrade_path: if version is not initial_version: log.info( "SHUTDOWN ALL CB OR MB SERVERS IN CLUSTER BEFORE DOING UPGRADE" ) for server in servers: shell = RemoteMachineShellConnection(server) shell.stop_membase() shell.disconnect() log.info("Upgrading to version {0}".format(version)) appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue( appropriate_build.url, msg="unable to find build {0}".format(version)) for server in servers: remote = RemoteMachineShellConnection(server) if version.startswith("1.8"): abbr_product = "cb" remote.download_binary_in_win(appropriate_build.url, abbr_product, version) log.info("###### START UPGRADE. #########") remote.membase_upgrade_win(info.architecture_type, info.windows_name, version, initial_version) RestHelper( RestConnection(server)).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) #verify admin_creds still set pools_info = RestConnection(server).get_pools_info() self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version) if not start_upgraded_first: remote.stop_membase() remote.disconnect() if not start_upgraded_first: log.info("Starting all servers together") self._start_membase_servers(servers) time.sleep(TIMEOUT_SECS) if create_buckets: self.assertTrue( BucketOperationHelper.wait_for_bucket_creation( 'default', RestConnection(master)), msg="bucket 'default' does not exist..") if insert_data: self._verify_data(master, rest, inserted_keys) # rolling upgrade else: version = input.test_params['version'] if version.startswith("1.8"): abbr_product = "cb" appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version)) # rebalance node out # remove membase from node # install destination version onto node # rebalance it back into the cluster for server_index in range(len(servers)): server = servers[server_index] master = servers[server_index - 1] log.info("current master is {0}, rolling node is {1}".format( master, server)) rest = RestConnection(master) nodes = rest.node_statuses() allNodes = [] toBeEjectedNodes = [] for node in nodes: allNodes.append(node.id) if "{0}:{1}".format(node.ip, node.port) == "{0}:{1}".format( server.ip, server.port): toBeEjectedNodes.append(node.id) helper = RestHelper(rest) removed = helper.remove_nodes(knownNodes=allNodes, ejectedNodes=toBeEjectedNodes) self.assertTrue( removed, msg="Unable to remove nodes {0}".format(toBeEjectedNodes)) remote = RemoteMachineShellConnection(server) remote.membase_uninstall() remote.couchbase_uninstall() if appropriate_build.product == 'membase-server-enterprise': abbr_product = "mb" else: abbr_product = "cb" remote.download_binary_in_win(appropriate_build.url, abbr_product, version) remote.install_server_win(appropriate_build, version) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) time.sleep(TIMEOUT_SECS) rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password) rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect() #readd this to the cluster ClusterOperationHelper.add_all_nodes_or_assert( master, [server], rest_settings, self) nodes = rest.node_statuses() log.info( "wait 30 seconds before asking older node for start rebalance" ) time.sleep(30) otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue( rebalanceStarted, "unable to start rebalance on master node {0}".format( master.ip)) log.info( 'started rebalance operation on master node {0}'.format( master.ip)) rebalanceSucceeded = rest.monitorRebalance() self.assertTrue( rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful". format(otpNodeIds)) #ClusterOperationHelper.verify_persistence(servers, self) #TODO: how can i verify that the cluster init config is preserved # verify data on upgraded nodes if create_buckets: self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( 'default', RestConnection(master)), msg="bucket 'default' does not exist..") if insert_data: self._verify_data(master, rest, inserted_keys) rest = RestConnection(master) buckets = rest.get_buckets() for bucket in buckets: BucketOperationHelper.keys_exist_or_assert( bucket_data[bucket.name]["inserted_keys"], master, bucket.name, self)