def _get_build(master, version, is_amazon=False): log = logger.Logger.get_logger() remote = RemoteMachineShellConnection(master) info = remote.extract_remote_info() remote.disconnect() builds, changes = BuildQuery().get_all_builds() log.info("finding build {0} for machine {1}".format(version, master)) result = re.search('r', version) product = 'membase-server-enterprise' if re.search('1.8', version): product = 'couchbase-server-enterprise' if result is None and not version.startswith("1.8.1"): appropriate_build = BuildQuery().find_membase_release_build( product, info.deliverable_type, info.architecture_type, version.strip(), is_amazon=is_amazon) else: appropriate_build = BuildQuery().find_membase_build( builds, product, info.deliverable_type, info.architecture_type, version.strip(), is_amazon=is_amazon) return appropriate_build
def test_install(self): # find the right deliverable for this os? query = BuildQuery() version = self.input.test_params['version'] builds, changes = query.get_all_builds() for serverInfo in self.servers: self._test_install(serverInfo,version,builds)
def _test_install(self,serverInfo,version,builds): query = BuildQuery() info = self.machine_infos[serverInfo.ip] names = ['membase-server-enterprise', 'membase-server-community', 'couchbase-server-enterprise', 'couchbase-server-community'] build = None for name in names: build = query.find_membase_build(builds, name, info.deliverable_type, info.architecture_type, version.strip()) if build: break if not build: self.fail('unable to find any {0} build for {1} for arch : {2} '.format(info.distribution_type, info.architecture_type, version.strip())) print 'for machine : ', info.architecture_type, info.distribution_type, 'relevant build : ', build remote_client = RemoteMachineShellConnection(serverInfo) remote_client.membase_uninstall() remote_client.couchbase_uninstall() if 'amazon' in self.input.test_params: build.url = build.url.replace("http://builds.hq.northscale.net/latestbuilds/", "http://packages.northscale.com/latestbuilds/") build.url = build.url.replace("enterprise", "community") build.name = build.name.replace("enterprise", "community") downloaded = remote_client.download_build(build) self.assertTrue(downloaded, 'unable to download binaries :'.format(build.url)) remote_client.membase_install(build) #TODO: we should poll the 8091 port until it is up and running self.log.info('wait 5 seconds for membase server to start') time.sleep(5) start_time = time.time() cluster_initialized = False while time.time() < (start_time + (10 * 60)): rest = RestConnection(serverInfo) try: if serverInfo.data_path: self.log.info("setting data path to " + serverInfo.data_path) rest.set_data_path(serverInfo.data_path) rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password) cluster_initialized = True break except ServerUnavailableException: self.log.error("error happened while initializing the cluster @ {0}".format(serverInfo.ip)) self.log.info('sleep for 5 seconds before trying again ...') time.sleep(5) self.assertTrue(cluster_initialized, "error happened while initializing the cluster @ {0}".format(serverInfo.ip)) if not cluster_initialized: self.log.error("error happened while initializing the cluster @ {0}".format(serverInfo.ip)) raise Exception("error happened while initializing the cluster @ {0}".format(serverInfo.ip)) nodeinfo = rest.get_nodes_self() rest.init_cluster_memoryQuota(memoryQuota=nodeinfo.mcdMemoryReserved) rest.init_cluster_memoryQuota(256)
def test_win_uninstall(self, remote_client, product, os_type, os_version, build_version, server_path): query = BuildQuery() builds, changes = query.get_all_builds() os_version = self.input.test_params['win'] task = 'uninstall' ex_type = 'exe' bat_file = 'uninstall.bat' version_file = 'VERSION.txt' if os_type == '64': Arch = 'x86_64' elif os_type == '32': Arch = 'x86' if product == 'cse': name = 'couchbase-server-enterprise' elif product == 'csse': name = 'couchbase-single-server-enterprise' elif product == 'csc': name = 'couchbase-server-community' elif product == 'cssc': name = 'couchbase-single-server-community' else: self.log.error("Unknon product type.") # no need later cb_server_alias = ['cse','csc'] cb_single_alias = ['csse','cssc'] if product in cb_server_alias: server_path = "/cygdrive/c/Program Files/Couchbase/Server/" elif product in cb_single_alias: server_path = "/cygdrive/c/Program Files (x86)/Couchbase/Server/" info = remote_client.extract_remote_info() build_name, version = remote_client.find_build_version(server_path, version_file) self.log.info('build needed to do auto uninstall {0}'.format(build_name)) # find installed build in tmp directory build_name = build_name.rstrip() + ".exe" self.log.info('Check if {0} is in tmp directory'.format(build_name)) exist = remote_client.file_exists("/cygdrive/c/tmp/", build_name) if not exist: build = query.find_build(builds, name, ex_type, Arch, version) downloaded = remote_client.download_binary_in_win(build.url,product,version) if downloaded: self.log.info('Successful download {0}_{1}.exe'.format(product, version)) else: self.log.error('Download {0}_{1}.exe failed'.format(product, version)) # modify uninstall bat file to change build name. remote_client.modify_bat_file('/cygdrive/c/automation', bat_file, product, os_type, os_version, version, task) self.log.info('sleep for 5 seconds before running task schedule uninstall') time.sleep(5) # run task schedule to uninstall Couchbase Server self.log.info('Start to uninstall couchbase {0}_{1}'.format(product, version)) output, error = remote_client.execute_command("cmd /c schtasks /run /tn removeme") remote_client.log_command_output(output, error) remote_client.wait_till_file_deleted(server_path, version_file, timeout_in_seconds=600) self.log.info('sleep 15 seconds before running the next job ...') time.sleep(15)
def _get_build(self, server, version, remote, is_amazon=False): info = remote.extract_remote_info() builds, changes = BuildQuery().get_all_builds() self.log.info("finding build %s for machine %s" % (version, server)) result = re.search('r', version) if result is None: appropriate_build = BuildQuery().\ find_membase_release_build('%s-enterprise' % (self.product), info.deliverable_type, info.architecture_type, version.strip(), is_amazon=is_amazon) else: appropriate_build = BuildQuery().\ find_membase_build(builds, '%s-enterprise' % (self.product), info.deliverable_type, info.architecture_type, version.strip(), is_amazon=is_amazon) return appropriate_build
def test_install_parallel(self): # find the right deliverable for this os? query = BuildQuery() version = self.input.test_params['version'] builds, changes = query.get_all_builds() threads = [] for serverInfo in self.servers: new_thread = Thread(None, self._test_install, None, (serverInfo, version, builds)) new_thread.start() threads.append(new_thread) self.log.info("waiting for all installer threads to complete...") for t in threads: self.log.info("thread {0} finished".format(t)) t.join()
def __get_build(self, version, remote, is_amazon=False, info=None): if info is None: info = remote.extract_remote_info() build_repo = CB_REPO if version[:5] in COUCHBASE_VERSIONS: if version[:3] in CB_VERSION_NAME: build_repo = CB_REPO + CB_VERSION_NAME[version[:3]] + "/" elif version[:5] in COUCHBASE_MP_VERSION: build_repo = MV_LATESTBUILD_REPO builds, changes = BuildQuery().get_all_builds( version=version, timeout=self.wait_timeout * 5, deliverable_type=info.deliverable_type, architecture_type=info.architecture_type, edition_type="couchbase-server-enterprise", repo=build_repo, distribution_version=info.distribution_version.lower()) if re.match(r'[1-9].[0-9].[0-9]-[0-9]+$', version): version = version + "-rel" if version[:5] in self.released_versions: appropriate_build = BuildQuery(). \ find_couchbase_release_build( '%s-enterprise' % self.product, info.deliverable_type, info.architecture_type, version.strip(), is_amazon=is_amazon, os_version=info.distribution_version) else: appropriate_build = BuildQuery(). \ find_build(builds, '%s-enterprise' % self.product, info.deliverable_type, info.architecture_type, version.strip()) if appropriate_build is None: self.log.info( "Builds are: %s \n. Remote is %s, %s. Result is: %s" % (builds, remote.ip, remote.username, version)) raise Exception("Build %s not found" % version) return appropriate_build
def handle_command_line_u_or_v(option, argument): input_build = TestInputBuild() if option == "-u": # let's check whether this url exists or not # let's extract version from this url pass if option == "-v": allbuilds = BuildQuery().get_all_builds() for build in allbuilds: if build.product_version == argument: input_build.url = build.url input_build.version = argument break return input_build
def build_url(self, params): _errors = [] version = '' server = '' openssl = '' names = [] url = '' direct_build_url = None # replace "v" with version # replace p with product tmp = {} for k in params: value = params[k] if k == "v": tmp["version"] = value elif k == "p": tmp["version"] = value else: tmp[k] = value params = tmp ok = True if not "version" in params and len(params["version"]) < 5: _errors.append(errors["INVALID-PARAMS"]) ok = False else: version = params["version"] if ok: if not "product" in params: _errors.append(errors["INVALID-PARAMS"]) ok = False if ok: if not "server" in params: _errors.append(errors["INVALID-PARAMS"]) ok = False else: server = params["server"] if ok: if "toy" in params: toy = params["toy"] else: toy = "" if ok: if "openssl" in params: openssl = params["openssl"] if ok: if "url" in params and params["url"] != "": direct_build_url = params["url"] if ok: mb_alias = ["membase", "membase-server", "mbs", "mb"] cb_alias = ["couchbase", "couchbase-server", "cb"] css_alias = ["couchbase-single", "couchbase-single-server", "css"] moxi_alias = ["moxi", "moxi-server"] if params["product"] in mb_alias: names = ['membase-server-enterprise', 'membase-server-community'] elif params["product"] in cb_alias: if "type" in params and params["type"].lower() in "couchbase-server-community": names = ['couchbase-server-community'] elif "type" in params and params["type"].lower() in "couchbase-server-enterprise": names = ['couchbase-server-enterprise'] else: names = ['couchbase-server-enterprise', 'couchbase-server-community'] elif params["product"] in css_alias: names = ['couchbase-single-server-enterprise', 'couchbase-single-server-community'] elif params["product"] in moxi_alias: names = ['moxi-server'] else: ok = False _errors.append(errors["INVALID-PARAMS"]) if "1" in openssl: names = ['couchbase-server-enterprise_centos6', 'couchbase-server-community_centos6', \ 'couchbase-server-enterprise_ubuntu_1204', 'couchbase-server-community_ubuntu_1204'] if "toy" in params: if "1" in openssl: names = ['couchbase-server-community_cent64'] else: names = ['couchbase-server-community_cent58'] remote_client = RemoteMachineShellConnection(server) info = remote_client.extract_remote_info() remote_client.disconnect() if ok: timeout = 300 if "timeout" in params: timeout = int(params["timeout"]) releases_version = ["1.6.5.4", "1.7.0", "1.7.1", "1.7.1.1", "1.8.0"] cb_releases_version = ["1.8.1", "2.0.0", "2.0.1", "2.1.0", "2.1.1", "2.2.0", "2.5.0", "2.5.1"] for name in names: if version in releases_version: build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=version, product='membase-server-enterprise') elif version in cb_releases_version: build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=version, product=name) else: builds, changes = BuildQuery().get_all_builds(version=version, timeout=timeout, \ direct_build_url=direct_build_url, \ deliverable_type=info.deliverable_type, \ architecture_type=info.architecture_type, \ edition_type=name, \ repo=MV_LATESTBUILD_REPO, toy=toy, \ distribution_version=info.distribution_version.lower()) build = BuildQuery().find_build(builds, name, info.deliverable_type, \ info.architecture_type, version, toy=toy, \ openssl=openssl, direct_build_url=direct_build_url, \ distribution_version=info.distribution_version.lower()) if build: if 'amazon' in params: type = info.type.lower() if type == 'windows' and version in releases_version: build.url = build.url.replace("http://builds.hq.northscale.net", "https://s3.amazonaws.com/packages.couchbase") build.url = build.url.replace("enterprise", "community") build.name = build.name.replace("enterprise", "community") else: """ since url in S3 insert version into it, we need to put version in like ..latestbuilds/3.0.0/... """ cb_version = version[:5] build.url = build.url.replace("http://builds.hq.northscale.net/latestbuilds", "http://packages.northscale.com/latestbuilds/{0}".format(cb_version)) """ test enterprise version """ #build.url = build.url.replace("enterprise", "community") #build.name = build.name.replace("enterprise", "community") return build _errors.append(errors["BUILD-NOT-FOUND"]) msg = "unable to find a build for product {0} version {1} for package_type {2}" raise Exception(msg.format(names, version, info.deliverable_type))
def build_url(self, params): _errors = [] version = "" server = "" openssl = "" names = [] url = "" direct_build_url = None # replace "v" with version # replace p with product tmp = {} for k in params: value = params[k] if k == "v": tmp["version"] = value elif k == "p": tmp["version"] = value else: tmp[k] = value params = tmp ok = True if not "version" in params and len(params["version"]) < 5: _errors.append(errors["INVALID-PARAMS"]) ok = False else: version = params["version"] if ok: if not "product" in params: _errors.append(errors["INVALID-PARAMS"]) ok = False if ok: if not "server" in params: _errors.append(errors["INVALID-PARAMS"]) ok = False else: server = params["server"] if ok: if "toy" in params: toy = params["toy"] else: toy = "" if ok: if "openssl" in params: openssl = params["openssl"] if ok: if "url" in params and params["url"] != "": direct_build_url = params["url"] if ok: mb_alias = ["membase", "membase-server", "mbs", "mb"] cb_alias = ["couchbase", "couchbase-server", "cb"] css_alias = ["couchbase-single", "couchbase-single-server", "css"] moxi_alias = ["moxi", "moxi-server"] if params["product"] in mb_alias: names = ["membase-server-enterprise", "membase-server-community"] elif params["product"] in cb_alias: if "type" in params and params["type"].lower() in "couchbase-server-community": names = ["couchbase-server-community"] elif "type" in params and params["type"].lower() in "couchbase-server-enterprise": names = ["couchbase-server-enterprise"] else: names = ["couchbase-server-enterprise", "couchbase-server-community"] elif params["product"] in css_alias: names = ["couchbase-single-server-enterprise", "couchbase-single-server-community"] elif params["product"] in moxi_alias: names = ["moxi-server"] else: ok = False _errors.append(errors["INVALID-PARAMS"]) if "1" in openssl: names = [ "couchbase-server-enterprise_centos6", "couchbase-server-community_centos6", "couchbase-server-enterprise_ubuntu_1204", "couchbase-server-community_ubuntu_1204", ] if "toy" in params: names = ["couchbase-server-enterprise"] remote_client = RemoteMachineShellConnection(server) info = remote_client.extract_remote_info() remote_client.disconnect() if ok: timeout = 300 if "timeout" in params: timeout = int(params["timeout"]) releases_version = ["1.6.5.4", "1.7.0", "1.7.1", "1.7.1.1", "1.8.0"] cb_releases_version = [ "1.8.1", "2.0.0", "2.0.1", "2.1.0", "2.1.1", "2.2.0", "2.5.0", "2.5.1", "2.5.2", "3.0.0", "3.0.1", "3.0.2", "3.0.3", "3.1.0", ] build_repo = MV_LATESTBUILD_REPO if toy is not "": build_repo = CB_REPO elif version[:5] not in COUCHBASE_VERSION_2 and version[:5] not in COUCHBASE_VERSION_3: if version[:3] in CB_VERSION_NAME: build_repo = CB_REPO + CB_VERSION_NAME[version[:3]] + "/" else: sys.exit("version is not support yet") for name in names: if version[:5] in releases_version: build = BuildQuery().find_membase_release_build( deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=version, product="membase-server-enterprise", ) elif version[:5] in cb_releases_version: build = BuildQuery().find_membase_release_build( deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=version, product=name, os_version=info.distribution_version, ) else: builds, changes = BuildQuery().get_all_builds( version=version, timeout=timeout, direct_build_url=direct_build_url, deliverable_type=info.deliverable_type, architecture_type=info.architecture_type, edition_type=name, repo=build_repo, toy=toy, distribution_version=info.distribution_version.lower(), distribution_type=info.distribution_type.lower(), ) build = BuildQuery().find_build( builds, name, info.deliverable_type, info.architecture_type, version, toy=toy, openssl=openssl, direct_build_url=direct_build_url, distribution_version=info.distribution_version.lower(), distribution_type=info.distribution_type.lower(), ) if build: if "amazon" in params: type = info.type.lower() if type == "windows" and version in releases_version: build.url = build.url.replace( "http://builds.hq.northscale.net", "https://s3.amazonaws.com/packages.couchbase" ) build.url = build.url.replace("enterprise", "community") build.name = build.name.replace("enterprise", "community") else: """ since url in S3 insert version into it, we need to put version in like ..latestbuilds/3.0.0/... """ cb_version = version[:5] build.url = build.url.replace( "http://builds.hq.northscale.net/latestbuilds", "http://packages.northscale.com/latestbuilds/{0}".format(cb_version), ) """ test enterprise version """ # build.url = build.url.replace("enterprise", "community") # build.name = build.name.replace("enterprise", "community") """ check if URL is live """ remote_client = RemoteMachineShellConnection(server) if remote_client.is_url_live(build.url): return build else: sys.exit("ERROR: URL is not good. Check URL again") _errors.append(errors["BUILD-NOT-FOUND"]) msg = "unable to find a build for product {0} version {1} for package_type {2}" raise Exception(msg.format(names, version, info.deliverable_type))
def _install_and_upgrade(self, initial_version='1.6.5.3', initialize_cluster=False, create_buckets=False, insert_data=False): input = TestInputSingleton.input rest_settings = input.membase_settings servers = input.servers server = servers[0] save_upgrade_config = False if initial_version.startswith( "1.7") and input.test_params['version'].startswith("1.8"): save_upgrade_config = True is_amazon = False if input.test_params.get('amazon', False): is_amazon = True if initial_version.startswith("1.6") or initial_version.startswith( "1.7"): product = 'membase-server-enterprise' else: product = 'couchbase-server-enterprise' remote = RemoteMachineShellConnection(server) rest = RestConnection(server) info = remote.extract_remote_info() remote.membase_uninstall() remote.couchbase_uninstall() builds, changes = BuildQuery().get_all_builds() # check to see if we are installing from latestbuilds or releases # note: for newer releases (1.8.0) even release versions can have the # form 1.8.0r-55 if re.search('r', initial_version): builds, changes = BuildQuery().get_all_builds() older_build = BuildQuery().find_membase_build( builds, deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=initial_version, product=product, is_amazon=is_amazon) else: older_build = BuildQuery().find_membase_release_build( deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=initial_version, product=product, is_amazon=is_amazon) remote.stop_membase() remote.stop_couchbase() remote.download_build(older_build) #now let's install ? remote.membase_install(older_build) RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT) rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password) bucket_data = {} if initialize_cluster: rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) if create_buckets: _create_load_multiple_bucket(self, server, bucket_data, howmany=2) version = input.test_params['version'] appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version)) remote.download_build(appropriate_build) remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config) remote.disconnect() RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT) pools_info = rest.get_pools_info() rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password) time.sleep(TIMEOUT_SECS) #verify admin_creds still set self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version) if initialize_cluster: #TODO: how can i verify that the cluster init config is preserved if create_buckets: self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( 'bucket-0', rest), msg="bucket 'default' does not exist..") if insert_data: buckets = rest.get_buckets() for bucket in buckets: BucketOperationHelper.keys_exist_or_assert( bucket_data[bucket.name]["inserted_keys"], server, bucket.name, self)
def test_win_install(self): query = BuildQuery() builds, changes = query.get_all_builds() version = self.input.test_params['version'] os_version = self.input.test_params['win'] task = 'install' ok = True ex_type = 'exe' bat_file = 'install.bat' version_file = 'VERSION.txt' if self.input.test_params["ostype"] == '64': Arch = 'x86_64' os_type = '64' elif self.input.test_params["ostype"] == '32': Arch = 'x86' os_type = '32' else: ok = False self.log.error("Unknown os version.") product = self.input.test_params["product"] if product == 'cse': name = 'couchbase-server-enterprise' elif product == 'csse': name = 'couchbase-single-server-enterprise' elif product == 'csc': name = 'couchbase-server-community' elif product == 'cssc': name = 'couchbase-single-server-community' else: ok = False self.log.error("Unknon product type.") cb_server_alias = ['cse','csc'] cb_single_alias = ['csse','cssc'] if product in cb_server_alias: server_path = "/cygdrive/c/Program Files/Couchbase/Server/" elif product in cb_single_alias: server_path = "/cygdrive/c/Program Files (x86)/Couchbase/Server/" if ok: for serverInfo in self.servers: remote_client = RemoteMachineShellConnection(serverInfo) info = RemoteMachineShellConnection(serverInfo).extract_remote_info() build = query.find_build(builds, name, ex_type, Arch, version) #self.log.info("what is this {0}".format(build.url)) # check if previous couchbase server installed exist = remote_client.file_exists("/cygdrive/c/Program Files/Couchbase/Server/", version_file) if exist: # call uninstall function to install couchbase server self.log.info("Start uninstall cb server on this server") self.test_win_uninstall(remote_client, product, os_type, os_version, version, server_path) else: self.log.info('I am free. You can install couchbase server now') # directory path in remote server used to create or delete directory dir_paths = ['/cygdrive/c/automation','/cygdrive/c/tmp'] remote_client.create_multiple_dir(dir_paths) # copy files from local server to remote server remote_client.copy_files_local_to_remote('resources/windows/automation', '/cygdrive/c/automation') downloaded = remote_client.download_binary_in_win(build.url,product,version) if downloaded: self.log.info('Successful download {0}_{1}.exe'.format(product, version)) else: self.log.error('Download {0}_{1}.exe failed'.format(product, version)) remote_client.modify_bat_file('/cygdrive/c/automation', bat_file, product, os_type, os_version, version, task) self.log.info('sleep for 5 seconds before running task schedule install me') time.sleep(5) # run task schedule to install Couchbase Server output, error = remote_client.execute_command("cmd /c schtasks /run /tn installme") remote_client.log_command_output(output, error) remote_client.wait_till_file_added(server_path, version_file, timeout_in_seconds=600) self.log.info('sleep 15 seconds before running the next job ...') time.sleep(15) else: self.log.error("Can not install Couchbase Server.")
def _install_and_upgrade(self, initial_version='1.6.5.3', initialize_cluster=False, create_buckets=False, insert_data=False): log = logger.Logger.get_logger() input = TestInputSingleton.input version = input.test_params['version'] rest_settings = input.membase_settings servers = input.servers server = servers[0] is_amazon = False if input.test_params.get('amazon', False): is_amazon = True remote = RemoteMachineShellConnection(server) rest = RestConnection(server) info = remote.extract_remote_info() remote.membase_uninstall() remote.couchbase_uninstall() builds, changes = BuildQuery().get_all_builds() #release_builds = BuildQuery().get_all_release_builds(initial_version) #if initial_version == "1.7.2": # initial_version = "1.7.2r-20" older_build = BuildQuery().find_membase_release_build( deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=initial_version, product='membase-server-enterprise', is_amazon=is_amazon) if info.type.lower() == 'windows': if older_build.product_version.startswith("1.8"): abbr_product = "cb" else: abbr_product = "mb" remote.download_binary_in_win(older_build.url, abbr_product, initial_version) remote.install_server_win(older_build, initial_version) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password) bucket_data = {} if initialize_cluster: rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) if create_buckets: _create_load_multiple_bucket(self, server, bucket_data, howmany=2) if version.startswith("1.8"): abbr_product = "cb" appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version)) remote.download_binary_in_win(appropriate_build.url, abbr_product, version) remote.stop_membase() log.info("###### START UPGRADE. #########") remote.membase_upgrade_win(info.architecture_type, info.windows_name, version, initial_version) remote.disconnect() RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) pools_info = rest.get_pools_info() rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password) time.sleep(TIMEOUT_SECS) # verify admin_creds still set self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version) if initialize_cluster: #TODO: how can i verify that the cluster init config is preserved if create_buckets: self.assertTrue( BucketOperationHelper.wait_for_bucket_creation( 'bucket-0', rest), msg="bucket 'default' does not exist..") if insert_data: buckets = rest.get_buckets() for bucket in buckets: BucketOperationHelper.keys_exist_or_assert( bucket_data[bucket.name]["inserted_keys"], server, bucket.name, self) else: log.error("This is not windows server!")
def _install_and_upgrade(self, initial_version='1.6.5.3', create_buckets=False, insert_data=False, start_upgraded_first=True, load_ratio=-1, roll_upgrade=False, upgrade_path=[]): node_upgrade_path = [] node_upgrade_path.extend(upgrade_path) #then start them in whatever order you want inserted_keys = [] log = logger.Logger.get_logger() if roll_upgrade: log.info("performing a rolling upgrade") input = TestInputSingleton.input input_version = input.test_params['version'] rest_settings = input.membase_settings servers = input.servers is_amazon = False if input.test_params.get('amazon', False): is_amazon = True # install older build on all nodes for server in servers: remote = RemoteMachineShellConnection(server) rest = RestConnection(server) info = remote.extract_remote_info() older_build = BuildQuery().find_membase_release_build( deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=initial_version, product='membase-server-enterprise', is_amazon=is_amazon) remote.membase_uninstall() remote.couchbase_uninstall() if older_build.product_version.startswith("1.8"): abbr_product = "cb" else: abbr_product = "mb" remote.download_binary_in_win(older_build.url, abbr_product, initial_version) #now let's install ? remote.install_server_win(older_build, initial_version) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password) rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect() bucket_data = {} master = servers[0] # cluster all the nodes together ClusterOperationHelper.add_all_nodes_or_assert(master, servers, rest_settings, self) rest = RestConnection(master) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue( rebalanceStarted, "unable to start rebalance on master node {0}".format(master.ip)) log.info('started rebalance operation on master node {0}'.format( master.ip)) rebalanceSucceeded = rest.monitorRebalance() self.assertTrue( rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful".format( otpNodeIds)) if create_buckets: #let's create buckets #wait for the bucket #bucket port should also be configurable , pass it as the #parameter to this test ? later self._create_default_bucket(master) inserted_keys = self._load_data(master, load_ratio) _create_load_multiple_bucket(self, master, bucket_data, howmany=2) #if initial_version == "1.7.0" or initial_version == "1.7.1": # self._save_config(rest_settings, master) node_upgrade_path.append(input_version) #if we dont want to do roll_upgrade ? log.info("Upgrade path: {0} -> {1}".format(initial_version, node_upgrade_path)) log.info("List of servers {0}".format(servers)) if not roll_upgrade: for version in node_upgrade_path: if version is not initial_version: log.info( "SHUTDOWN ALL CB OR MB SERVERS IN CLUSTER BEFORE DOING UPGRADE" ) for server in servers: shell = RemoteMachineShellConnection(server) shell.stop_membase() shell.disconnect() log.info("Upgrading to version {0}".format(version)) appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue( appropriate_build.url, msg="unable to find build {0}".format(version)) for server in servers: remote = RemoteMachineShellConnection(server) if version.startswith("1.8"): abbr_product = "cb" remote.download_binary_in_win(appropriate_build.url, abbr_product, version) log.info("###### START UPGRADE. #########") remote.membase_upgrade_win(info.architecture_type, info.windows_name, version, initial_version) RestHelper( RestConnection(server)).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) #verify admin_creds still set pools_info = RestConnection(server).get_pools_info() self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version) if not start_upgraded_first: remote.stop_membase() remote.disconnect() if not start_upgraded_first: log.info("Starting all servers together") self._start_membase_servers(servers) time.sleep(TIMEOUT_SECS) if create_buckets: self.assertTrue( BucketOperationHelper.wait_for_bucket_creation( 'default', RestConnection(master)), msg="bucket 'default' does not exist..") if insert_data: self._verify_data(master, rest, inserted_keys) # rolling upgrade else: version = input.test_params['version'] if version.startswith("1.8"): abbr_product = "cb" appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version)) # rebalance node out # remove membase from node # install destination version onto node # rebalance it back into the cluster for server_index in range(len(servers)): server = servers[server_index] master = servers[server_index - 1] log.info("current master is {0}, rolling node is {1}".format( master, server)) rest = RestConnection(master) nodes = rest.node_statuses() allNodes = [] toBeEjectedNodes = [] for node in nodes: allNodes.append(node.id) if "{0}:{1}".format(node.ip, node.port) == "{0}:{1}".format( server.ip, server.port): toBeEjectedNodes.append(node.id) helper = RestHelper(rest) removed = helper.remove_nodes(knownNodes=allNodes, ejectedNodes=toBeEjectedNodes) self.assertTrue( removed, msg="Unable to remove nodes {0}".format(toBeEjectedNodes)) remote = RemoteMachineShellConnection(server) remote.membase_uninstall() remote.couchbase_uninstall() if appropriate_build.product == 'membase-server-enterprise': abbr_product = "mb" else: abbr_product = "cb" remote.download_binary_in_win(appropriate_build.url, abbr_product, version) remote.install_server_win(appropriate_build, version) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) time.sleep(TIMEOUT_SECS) rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password) rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect() #readd this to the cluster ClusterOperationHelper.add_all_nodes_or_assert( master, [server], rest_settings, self) nodes = rest.node_statuses() log.info( "wait 30 seconds before asking older node for start rebalance" ) time.sleep(30) otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue( rebalanceStarted, "unable to start rebalance on master node {0}".format( master.ip)) log.info( 'started rebalance operation on master node {0}'.format( master.ip)) rebalanceSucceeded = rest.monitorRebalance() self.assertTrue( rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful". format(otpNodeIds)) #ClusterOperationHelper.verify_persistence(servers, self) #TODO: how can i verify that the cluster init config is preserved # verify data on upgraded nodes if create_buckets: self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( 'default', RestConnection(master)), msg="bucket 'default' does not exist..") if insert_data: self._verify_data(master, rest, inserted_keys) rest = RestConnection(master) buckets = rest.get_buckets() for bucket in buckets: BucketOperationHelper.keys_exist_or_assert( bucket_data[bucket.name]["inserted_keys"], master, bucket.name, self)
def build_url(self, params): _errors = [] version = '' server = '' names = [] # replace "v" with version # replace p with product tmp = {} for k in params: value = params[k] if k == "v": tmp["version"] = value elif k == "p": tmp["version"] = value else: tmp[k] = value params = tmp ok = True if not "version" in params: _errors.append(errors["INVALID-PARAMS"]) ok = False else: version = params["version"] if ok: if not "product" in params: _errors.append(errors["INVALID-PARAMS"]) ok = False if ok: if not "server" in params: _errors.append(errors["INVALID-PARAMS"]) ok = False else: server = params["server"] if ok: if "toy" in params: toy = params["toy"] else: toy = "" if ok: mb_alias = ["membase", "membase-server", "mbs", "mb"] cb_alias = ["couchbase", "couchbase-server", "cb"] css_alias = ["couchbase-single", "couchbase-single-server", "css"] if params["product"] in mb_alias: names = ['membase-server-enterprise', 'membase-server-community'] elif params["product"] in cb_alias: if "type" in params and params["type"].lower() in "couchbase-server-community": names = ['couchbase-server-community'] elif "type" in params and params["type"].lower() in "couchbase-server-enterprise": names = ['couchbase-server-enterprise'] else: names = ['couchbase-server-enterprise', 'couchbase-server-community'] elif params["product"] in css_alias: names = ['couchbase-single-server-enterprise', 'couchbase-single-server-community'] else: ok = False _errors.append(errors["INVALID-PARAMS"]) remote_client = RemoteMachineShellConnection(server) info = remote_client.extract_remote_info() remote_client.disconnect() if ok: builds, changes = BuildQuery().get_all_builds() releases_version = ["1.6.5.4", "1.7.0", "1.7.1", "1.7.1.1"] for name in names: if version in releases_version: build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=version, product='membase-server-enterprise') else: build = BuildQuery().find_build(builds, name, info.deliverable_type, info.architecture_type, version, toy=toy) if build: if 'amazon' in params: type = info.type.lower() if type == 'windows' and version in releases_version: build.url = build.url.replace("http://builds.hq.northscale.net", "https://s3.amazonaws.com/packages.couchbase") build.url = build.url.replace("enterprise", "community") build.name = build.name.replace("enterprise", "community") else: build.url = build.url.replace("http://builds.hq.northscale.net", "http://packages.northscale.com") build.url = build.url.replace("enterprise", "community") build.name = build.name.replace("enterprise", "community") return build _errors.append(errors["BUILD-NOT-FOUND"]) msg = "unable to find a build for product {0} version {1} for package_type {2}" raise Exception(msg.format(names, version, info.deliverable_type))
def _install_and_upgrade(self, initial_version='1.6.5.3', create_buckets=False, insert_data=False, start_upgraded_first=True, load_ratio=-1, roll_upgrade=False, upgrade_path=[], do_new_rest=False): node_upgrade_path = [] node_upgrade_path.extend(upgrade_path) #then start them in whatever order you want inserted_keys = [] log = logger.Logger.get_logger() if roll_upgrade: log.info("performing an online upgrade") input = TestInputSingleton.input rest_settings = input.membase_settings servers = input.servers save_upgrade_config = False is_amazon = False if input.test_params.get('amazon', False): is_amazon = True if initial_version.startswith("1.6") or initial_version.startswith( "1.7"): product = 'membase-server-enterprise' else: product = 'couchbase-server-enterprise' # install older build on all nodes for server in servers: remote = RemoteMachineShellConnection(server) rest = RestConnection(server) info = remote.extract_remote_info() # check to see if we are installing from latestbuilds or releases # note: for newer releases (1.8.0) even release versions can have the # form 1.8.0r-55 if re.search('r', initial_version): builds, changes = BuildQuery().get_all_builds() older_build = BuildQuery().find_membase_build( builds, deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=initial_version, product=product, is_amazon=is_amazon) else: older_build = BuildQuery().find_membase_release_build( deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=initial_version, product=product, is_amazon=is_amazon) remote.membase_uninstall() remote.couchbase_uninstall() remote.stop_membase() remote.stop_couchbase() remote.download_build(older_build) #now let's install ? remote.membase_install(older_build) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password) rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect() bucket_data = {} master = servers[0] if create_buckets: #let's create buckets #wait for the bucket #bucket port should also be configurable , pass it as the #parameter to this test ? later self._create_default_bucket(master) inserted_keys = self._load_data(master, load_ratio) _create_load_multiple_bucket(self, master, bucket_data, howmany=2) # cluster all the nodes together ClusterOperationHelper.add_all_nodes_or_assert(master, servers, rest_settings, self) rest = RestConnection(master) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue( rebalanceStarted, "unable to start rebalance on master node {0}".format(master.ip)) log.info('started rebalance operation on master node {0}'.format( master.ip)) rebalanceSucceeded = rest.monitorRebalance() self.assertTrue( rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful".format( otpNodeIds)) if initial_version == "1.7.0" or initial_version == "1.7.1": self._save_config(rest_settings, master) input_version = input.test_params['version'] node_upgrade_path.append(input_version) current_version = initial_version previous_version = current_version #if we dont want to do roll_upgrade ? log.info("Upgrade path: {0} -> {1}".format(initial_version, node_upgrade_path)) log.info("List of servers {0}".format(servers)) if not roll_upgrade: for version in node_upgrade_path: previous_version = current_version current_version = version if version != initial_version: log.info("Upgrading to version {0}".format(version)) self._stop_membase_servers(servers) if previous_version.startswith( "1.7") and current_version.startswith("1.8"): save_upgrade_config = True # No need to save the upgrade config from 180 to 181 if previous_version.startswith( "1.8.0") and current_version.startswith("1.8.1"): save_upgrade_config = False appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue( appropriate_build.url, msg="unable to find build {0}".format(version)) for server in servers: remote = RemoteMachineShellConnection(server) remote.download_build(appropriate_build) remote.membase_upgrade( appropriate_build, save_upgrade_config=save_upgrade_config) RestHelper( RestConnection(server)).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) #verify admin_creds still set pools_info = RestConnection(server).get_pools_info() self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version) if start_upgraded_first: log.info("Starting server {0} post upgrade".format( server)) remote.start_membase() else: remote.stop_membase() remote.disconnect() if not start_upgraded_first: log.info("Starting all servers together") self._start_membase_servers(servers) time.sleep(TIMEOUT_SECS) if version == "1.7.0" or version == "1.7.1": self._save_config(rest_settings, master) if create_buckets: self.assertTrue( BucketOperationHelper.wait_for_bucket_creation( 'default', RestConnection(master)), msg="bucket 'default' does not exist..") if insert_data: self._verify_data(master, rest, inserted_keys) # rolling upgrade else: version = input.test_params['version'] appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon) self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version)) # rebalance node out # remove membase from node # install destination version onto node # rebalance it back into the cluster for server_index in range(len(servers)): server = servers[server_index] master = servers[server_index - 1] log.info("current master is {0}, rolling node is {1}".format( master, server)) rest = RestConnection(master) nodes = rest.node_statuses() allNodes = [] toBeEjectedNodes = [] for node in nodes: allNodes.append(node.id) if "{0}:{1}".format(node.ip, node.port) == "{0}:{1}".format( server.ip, server.port): toBeEjectedNodes.append(node.id) helper = RestHelper(rest) removed = helper.remove_nodes(knownNodes=allNodes, ejectedNodes=toBeEjectedNodes) self.assertTrue( removed, msg="Unable to remove nodes {0}".format(toBeEjectedNodes)) remote = RemoteMachineShellConnection(server) remote.download_build(appropriate_build) # if initial version is 180 # Don't uninstall the server if not initial_version.startswith('1.8.0'): remote.membase_uninstall() remote.couchbase_uninstall() remote.membase_install(appropriate_build) else: remote.membase_upgrade(appropriate_build) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) log.info( "sleep for 10 seconds to wait for membase-server to start..." ) time.sleep(TIMEOUT_SECS) rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password) rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect() #readd this to the cluster ClusterOperationHelper.add_all_nodes_or_assert( master, [server], rest_settings, self) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) # Issue rest call to the newly added node # MB-5108 if do_new_rest: master = server rest = RestConnection(master) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue( rebalanceStarted, "unable to start rebalance on master node {0}".format( master.ip)) log.info( 'started rebalance operation on master node {0}'.format( master.ip)) rebalanceSucceeded = rest.monitorRebalance() self.assertTrue( rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful". format(otpNodeIds)) #TODO: how can i verify that the cluster init config is preserved # verify data on upgraded nodes if create_buckets: self.assertTrue(BucketOperationHelper.wait_for_bucket_creation( 'default', RestConnection(master)), msg="bucket 'default' does not exist..") if insert_data: self._verify_data(master, rest, inserted_keys) rest = RestConnection(master) buckets = rest.get_buckets() for bucket in buckets: BucketOperationHelper.keys_exist_or_assert( bucket_data[bucket.name]["inserted_keys"], master, bucket.name, self)
import sys import urllib2 sys.path.append('lib') from builds.build_query import BuildQuery if __name__ == "__main__": args = sys.argv if len(args) == 2: version = args[1] builds, changes = BuildQuery().get_all_builds(version=version) build = BuildQuery().find_membase_build_with_version(builds, version) if build.change and build.change.url: try: print "downloading {0}".format(build.change.url) page = urllib2.urlopen(build.change.url) changes = open('changes.txt', 'w') changes.write('{0}'.format(page.read())) changes.close() except urllib2.HTTPError as error: print 'unable to download {0}'.format(build.change.url)
def test_backup_upgrade_restore_default(self): if len(self.servers) < 2: self.log.error("At least 2 servers required for this test ..") return original_set = copy.copy(self.servers) worker = self.servers[len(self.servers) - 1] self.servers = self.servers[:len(self.servers) - 1] shell = RemoteMachineShellConnection(self.master) o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt") fin = o[0] shell.disconnect() initial_version = self.input.param("initial_version", fin) final_version = self.input.param("final_version", fin) if initial_version == final_version: self.log.error("Same initial and final versions ..") return if not final_version.startswith('2.0'): self.log.error("Upgrade test not set to run from 1.8.1 -> 2.0 ..") return builds, changes = BuildQuery().get_all_builds() product = 'couchbase-server-enterprise' #CASE where the worker isn't a 2.0+ worker_flag = 0 shell = RemoteMachineShellConnection(worker) o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt") temp = o[0] if not temp.startswith('2.0'): worker_flag = 1 if worker_flag == 1: self.log.info( "Loading version {0} on worker.. ".format(final_version)) remote = RemoteMachineShellConnection(worker) info = remote.extract_remote_info() older_build = BuildQuery().find_build(builds, product, info.deliverable_type, info.architecture_type, final_version) remote.stop_couchbase() remote.couchbase_uninstall() remote.download_build(older_build) remote.install_server(older_build) remote.disconnect() remote_tmp = "{1}/{0}".format("backup", "/root") perm_comm = "mkdir -p {0}".format(remote_tmp) if not initial_version == fin: for server in self.servers: remote = RemoteMachineShellConnection(server) info = remote.extract_remote_info() self.log.info( "Loading version .. {0}".format(initial_version)) older_build = BuildQuery().find_build(builds, product, info.deliverable_type, info.architecture_type, initial_version) remote.stop_couchbase() remote.couchbase_uninstall() remote.download_build(older_build) remote.install_server(older_build) rest = RestConnection(server) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) rest.init_cluster(server.rest_username, server.rest_password) rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect() self.common_setUp() bucket = "default" if len(self.servers) > 1: self.add_nodes_and_rebalance() rest = RestConnection(self.master) info = rest.get_nodes_self() size = int(info.memoryQuota * 2.0 / 3.0) rest.create_bucket(bucket, ramQuotaMB=size) ready = BucketOperationHelper.wait_for_memcached(self.master, bucket) self.assertTrue(ready, "wait_for_memcached_failed") distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05} inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys( servers=[self.master], name=bucket, ram_load_ratio=0.5, value_size_distribution=distribution, moxi=True, write_only=True, delete_ratio=0.1, number_of_threads=2) if len(self.servers) > 1: rest = RestConnection(self.master) self.assertTrue(RebalanceHelper.wait_for_replication( rest.get_nodes(), timeout=180), msg="replication did not complete") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0) self.assertTrue(ready, "wait_for ep_queue_size == 0 failed") node = RestConnection(self.master).get_nodes_self() shell = RemoteMachineShellConnection(worker) o, r = shell.execute_command(perm_comm) shell.log_command_output(o, r) shell.disconnect() #Backup #BackupHelper(self.master, self).backup(bucket, node, remote_tmp) shell = RemoteMachineShellConnection(worker) shell.execute_command( "/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format( self.master.ip, self.master.port, remote_tmp)) shell.disconnect() BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self) time.sleep(30) #Upgrade for server in self.servers: self.log.info( "Upgrading to current version {0}".format(final_version)) remote = RemoteMachineShellConnection(server) info = remote.extract_remote_info() new_build = BuildQuery().find_build(builds, product, info.deliverable_type, info.architecture_type, final_version) remote.stop_couchbase() remote.couchbase_uninstall() remote.download_build(new_build) remote.install_server(new_build) rest = RestConnection(server) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) rest.init_cluster(server.rest_username, server.rest_password) rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect() time.sleep(30) #Restore rest = RestConnection(self.master) info = rest.get_nodes_self() size = int(info.memoryQuota * 2.0 / 3.0) rest.create_bucket(bucket, ramQuotaMB=size) ready = BucketOperationHelper.wait_for_memcached(server, bucket) self.assertTrue(ready, "wait_for_memcached_failed") #BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi) shell = RemoteMachineShellConnection(worker) shell.execute_command( "/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format( self.master.ip, self.master.port, remote_tmp, bucket)) shell.disconnect() time.sleep(60) keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel( inserted_keys, self.master, bucket, self, concurrency=4) self.assertTrue(keys_exist, msg="unable to verify keys after restore") time.sleep(30) BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self) rest = RestConnection(self.master) helper = RestHelper(rest) nodes = rest.node_statuses() master_id = rest.get_nodes_self().id if len(self.servers) > 1: removed = helper.remove_nodes( knownNodes=[node.id for node in nodes], ejectedNodes=[ node.id for node in nodes if node.id != master_id ], wait_for_rebalance=True) shell = RemoteMachineShellConnection(worker) shell.remove_directory(remote_tmp) shell.disconnect() self.servers = copy.copy(original_set) if initial_version == fin: builds, changes = BuildQuery().get_all_builds() for server in self.servers: remote = RemoteMachineShellConnection(server) info = remote.extract_remote_info() self.log.info( "Loading version .. {0}".format(initial_version)) older_build = BuildQuery().find_build(builds, product, info.deliverable_type, info.architecture_type, initial_version) remote.stop_couchbase() remote.couchbase_uninstall() remote.download_build(older_build) remote.install_server(older_build) rest = RestConnection(server) RestHelper(rest).is_ns_server_running( testconstants.NS_SERVER_TIMEOUT) rest.init_cluster(server.rest_username, server.rest_password) rest.init_cluster_memoryQuota( memoryQuota=rest.get_nodes_self().mcdMemoryReserved) remote.disconnect()
def build_url(self, params): _errors = [] version = '' server = '' openssl = '' names = [] url = '' direct_build_url = None # replace "v" with version # replace p with product tmp = {} for k in params: value = params[k] if k == "v": tmp["version"] = value elif k == "p": tmp["version"] = value else: tmp[k] = value params = tmp ok = True if not "version" in params and len(params["version"]) < 5: _errors.append(errors["INVALID-PARAMS"]) ok = False else: version = params["version"] if ok: if not "product" in params: _errors.append(errors["INVALID-PARAMS"]) ok = False if ok: if not "server" in params: _errors.append(errors["INVALID-PARAMS"]) ok = False else: server = params["server"] if ok: if "toy" in params: toy = params["toy"] else: toy = "" if ok: if "openssl" in params: openssl = params["openssl"] if ok: if "url" in params and params["url"] != "": direct_build_url = params["url"] if ok: mb_alias = ["membase", "membase-server", "mbs", "mb"] cb_alias = ["couchbase", "couchbase-server", "cb"] css_alias = ["couchbase-single", "couchbase-single-server", "css"] moxi_alias = ["moxi", "moxi-server"] if params["product"] in mb_alias: names = ['membase-server-enterprise', 'membase-server-community'] elif params["product"] in cb_alias: if "type" in params and params["type"].lower() in "couchbase-server-community": names = ['couchbase-server-community'] elif "type" in params and params["type"].lower() in "couchbase-server-enterprise": names = ['couchbase-server-enterprise'] else: names = ['couchbase-server-enterprise', 'couchbase-server-community'] elif params["product"] in css_alias: names = ['couchbase-single-server-enterprise', 'couchbase-single-server-community'] elif params["product"] in moxi_alias: names = ['moxi-server'] else: ok = False _errors.append(errors["INVALID-PARAMS"]) if "1" in openssl: names = ['couchbase-server-enterprise_centos6', 'couchbase-server-community_centos6', \ 'couchbase-server-enterprise_ubuntu_1204', 'couchbase-server-community_ubuntu_1204'] if "toy" in params: if "1" in openssl: names = ['couchbase-server-community_cent64'] else: names = ['couchbase-server-community_cent58'] remote_client = RemoteMachineShellConnection(server) info = remote_client.extract_remote_info() remote_client.disconnect() if ok: timeout = 300 if "timeout" in params: timeout = int(params["timeout"]) releases_version = ["1.6.5.4", "1.7.0", "1.7.1", "1.7.1.1", "1.8.0"] cb_releases_version = ["1.8.1", "2.0.0", "2.0.1", "2.1.0", "2.1.1", "2.2.0", "2.5.0", "2.5.1"] build_repo = MV_LATESTBUILD_REPO if version[:3] == "3.5": build_repo = SHERLOCK_BUILD_REPO for name in names: if version in releases_version: build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=version, product='membase-server-enterprise') elif version in cb_releases_version: build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, build_version=version, product=name) else: builds, changes = BuildQuery().get_all_builds(version=version, timeout=timeout, \ direct_build_url=direct_build_url, \ deliverable_type=info.deliverable_type, \ architecture_type=info.architecture_type, \ edition_type=name, \ repo=build_repo, toy=toy, \ distribution_version=info.distribution_version.lower(), \ distribution_type=info.distribution_type.lower()) build = BuildQuery().find_build(builds, name, info.deliverable_type, \ info.architecture_type, version, toy=toy, \ openssl=openssl, direct_build_url=direct_build_url, \ distribution_version=info.distribution_version.lower(), \ distribution_type=info.distribution_type.lower()) if build: if 'amazon' in params: type = info.type.lower() if type == 'windows' and version in releases_version: build.url = build.url.replace("http://builds.hq.northscale.net", "https://s3.amazonaws.com/packages.couchbase") build.url = build.url.replace("enterprise", "community") build.name = build.name.replace("enterprise", "community") else: """ since url in S3 insert version into it, we need to put version in like ..latestbuilds/3.0.0/... """ cb_version = version[:5] build.url = build.url.replace("http://builds.hq.northscale.net/latestbuilds", "http://packages.northscale.com/latestbuilds/{0}".format(cb_version)) """ test enterprise version """ #build.url = build.url.replace("enterprise", "community") #build.name = build.name.replace("enterprise", "community") """ check if URL is live """ remote_client = RemoteMachineShellConnection(server) if remote_client.is_url_live(build.url): return build else: sys.exit("ERROR: URL is not good. Check URL again") _errors.append(errors["BUILD-NOT-FOUND"]) msg = "unable to find a build for product {0} version {1} for package_type {2}" raise Exception(msg.format(names, version, info.deliverable_type))