def incremental_offline_upgrade(self): upgrade_seq = self.input.param("upgrade_seq", "src>dest") self._install(self.servers[:self.src_init + self.dest_init ]) PauseResumeXDCRBaseTest.setUp(self) self.set_xdcr_param('xdcrFailureRestartInterval', 1) self.sleep(60) bucket = self._get_bucket(self, 'default', self.src_master) self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0) bucket = self._get_bucket(self, 'bucket0', self.src_master) self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0) bucket = self._get_bucket(self, 'bucket0', self.dest_master) gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items) self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0) self.sleep(self.wait_timeout) self._wait_for_replication_to_catchup() nodes_to_upgrade = [] if upgrade_seq == "src>dest": nodes_to_upgrade = copy.copy(self.src_nodes) nodes_to_upgrade.extend(self.dest_nodes) elif upgrade_seq == "src<dest": nodes_to_upgrade = copy.copy(self.dest_nodes) nodes_to_upgrade.extend(self.src_nodes) elif upgrade_seq == "src><dest": min_cluster = min(len(self.src_nodes), len(self.dest_nodes)) for i in xrange(min_cluster): nodes_to_upgrade.append(self.src_nodes[i]) nodes_to_upgrade.append(self.dest_nodes[i]) for _seq, node in enumerate(nodes_to_upgrade): self._offline_upgrade([node]) self.set_xdcr_param('xdcrFailureRestartInterval', 1) self.sleep(60) bucket = self._get_bucket(self, 'bucket0', self.src_master) itemPrefix = "loadThree" + _seq * 'a' gen_create3 = BlobGenerator(itemPrefix, itemPrefix, self._value_size, end=self.num_items) self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0) bucket = self._get_bucket(self, 'default', self.src_master) itemPrefix = "loadFour" + _seq * 'a' gen_create4 = BlobGenerator(itemPrefix, itemPrefix, self._value_size, end=self.num_items) self._load_bucket(bucket, self.src_master, gen_create4, 'create', exp=0) self.sleep(60) bucket = self._get_bucket(self, 'bucket0', self.src_master) self.do_merge_bucket(self.src_master, self.dest_master, True, bucket) bucket = self._get_bucket(self, 'default', self.src_master) self.do_merge_bucket(self.src_master, self.dest_master, False, bucket) self.verify_xdcr_stats(self.src_nodes, self.dest_nodes, True) self.sleep(self.wait_timeout * 5, "Let clusters work for some time")
def online_cluster_upgrade(self): self._install(self.servers[:self.src_init + self.dest_init]) prev_initial_version = self.initial_version self.initial_version = self.upgrade_versions[0] self._install(self.servers[self.src_init + self.dest_init:]) PauseResumeXDCRBaseTest.setUp(self) if prev_initial_version < "3.0.0": self.pause_xdcr_cluster = "" bucket_default = self._get_bucket(self, 'default', self.src_master) bucket_sasl = self._get_bucket(self, 'bucket0', self.src_master) bucket_standard = self._get_bucket(self, 'standard_bucket0', self.dest_master) self._load_bucket(bucket_default, self.src_master, self.gen_create, 'create', exp=0) self._load_bucket(bucket_sasl, self.src_master, self.gen_create, 'create', exp=0) self._load_bucket(bucket_standard, self.dest_master, self.gen_create, 'create', exp=0) gen_create2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size, end=self.num_items) self._load_bucket(bucket_sasl, self.dest_master, gen_create2, 'create', exp=0) if self.pause_xdcr_cluster != "": self.pause_xdcr() self._online_upgrade(self.src_nodes, self.servers[self.src_init + self.dest_init:]) self._install(self.src_nodes) self._online_upgrade(self.servers[self.src_init + self.dest_init:], self.src_nodes, False) self._load_bucket(bucket_default, self.src_master, self.gen_delete, 'delete', exp=0) self._load_bucket(bucket_default, self.src_master, self.gen_update, 'create', exp=self._expires) self._load_bucket(bucket_sasl, self.src_master, self.gen_delete, 'delete', exp=0) self._load_bucket(bucket_sasl, self.src_master, self.gen_update, 'create', exp=self._expires) self.sleep(120) self._install(self.servers[self.src_init + self.dest_init:]) self._online_upgrade(self.dest_nodes, self.servers[self.src_init + self.dest_init:]) self._install(self.dest_nodes) self._online_upgrade(self.servers[self.src_init + self.dest_init:], self.dest_nodes, False) self._load_bucket(bucket_standard, self.dest_master, self.gen_delete, 'delete', exp=0) self._load_bucket(bucket_standard, self.dest_master, self.gen_update, 'create', exp=self._expires) if self.pause_xdcr_cluster != "": self.resume_xdcr() self.do_merge_bucket(self.src_master, self.dest_master, True, bucket_sasl) bucket_sasl = self._get_bucket(self, 'bucket0', self.dest_master) gen_delete2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size, start=int((self.num_items) * (float)(100 - self._percent_delete) / 100), end=self.num_items) gen_update2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size, start=0, end=int(self.num_items * (float)(self._percent_update) / 100)) self._load_bucket(bucket_sasl, self.dest_master, gen_delete2, 'delete', exp=0) self._load_bucket(bucket_sasl, self.dest_master, gen_update2, 'create', exp=self._expires) self.do_merge_bucket(self.dest_master, self.src_master, False, bucket_sasl) self.do_merge_bucket(self.src_master, self.dest_master, False, bucket_default) self.do_merge_bucket(self.dest_master, self.src_master, False, bucket_standard) self.sleep(120) self._post_upgrade_ops() self.sleep(120) self.verify_xdcr_stats(self.src_nodes, self.dest_nodes, True) self.max_verify = None if self.ddocs_src: for bucket_name in self.buckets_on_src: bucket = self._get_bucket(self, bucket_name, self.src_master) expected_rows = sum([len(kv_store) for kv_store in bucket.kvs.values()]) self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_src, self.src_master) if self.ddocs_dest: for bucket_name in self.buckets_on_dest: bucket = self._get_bucket(self, bucket_name, self.dest_master) expected_rows = sum([len(kv_store) for kv_store in bucket.kvs.values()]) self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_dest, self.dest_master)
def offline_cluster_upgrade(self): self._install(self.servers[:self.src_init + self.dest_init ]) upgrade_nodes = self.input.param('upgrade_nodes', "src").split(";") PauseResumeXDCRBaseTest.setUp(self) self.set_xdcr_param('xdcrFailureRestartInterval', 1) if self.initial_version < "3.0.0": self.pause_xdcr_cluster = "" self.sleep(60) bucket = self._get_bucket(self, 'default', self.src_master) self._operations() self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0) bucket = self._get_bucket(self, 'bucket0', self.src_master) self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0) bucket = self._get_bucket(self, 'bucket0', self.dest_master) gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items) self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0) nodes_to_upgrade = [] if "src" in upgrade_nodes : nodes_to_upgrade += self.src_nodes if "dest" in upgrade_nodes : nodes_to_upgrade += self.dest_nodes self.sleep(60) self._wait_for_replication_to_catchup() if self.pause_xdcr_cluster != "": self.pause_xdcr() self._offline_upgrade(nodes_to_upgrade) if self._use_encryption_after_upgrade and "src" in upgrade_nodes and "dest" in upgrade_nodes and self.upgrade_versions[0] >= "2.5.0": if "src" in self._use_encryption_after_upgrade: src_remote_clusters = RestConnection(self.src_master).get_remote_clusters() for remote_cluster in src_remote_clusters: self._modify_clusters(None, self.src_master, remote_cluster['name'], self.dest_master, require_encryption=1) if "dest" in self._use_encryption_after_upgrade: dest_remote_clusters = RestConnection(self.dest_master).get_remote_clusters() for remote_cluster in dest_remote_clusters: self._modify_clusters(None, self.dest_master, remote_cluster['name'], self.src_master, require_encryption=1) self.set_xdcr_param('xdcrFailureRestartInterval', 1) self.sleep(60) bucket = self._get_bucket(self, 'bucket0', self.src_master) gen_create3 = BlobGenerator('loadThree', 'loadThree', self._value_size, end=self.num_items) self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0) bucket = self._get_bucket(self, 'bucket0', self.dest_master) gen_create4 = BlobGenerator('loadFour', 'loadFour', self._value_size, end=self.num_items) self._load_bucket(bucket, self.dest_master, gen_create4, 'create', exp=0) if self.pause_xdcr_cluster != "": self.resume_xdcr() self.do_merge_bucket(self.src_master, self.dest_master, True, bucket) bucket = self._get_bucket(self, 'default', self.src_master) self._load_bucket(bucket, self.src_master, gen_create2, 'create', exp=0) self.do_merge_bucket(self.src_master, self.dest_master, False, bucket) self.sleep(60) self._post_upgrade_ops() self.sleep(60) self.verify_xdcr_stats(self.src_nodes, self.dest_nodes, True) self.max_verify = None if self.ddocs_src: for bucket_name in self.buckets_on_src: bucket = self._get_bucket(self, bucket_name, self.src_master) expected_rows = sum([len(kv_store) for kv_store in bucket.kvs.values()]) self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_src, self.src_master) if self.ddocs_dest: for bucket_name in self.buckets_on_dest: bucket = self._get_bucket(self, bucket_name, self.dest_master) expected_rows = sum([len(kv_store) for kv_store in bucket.kvs.values()]) self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_dest, self.dest_master)
def incremental_offline_upgrade(self): upgrade_seq = self.input.param("upgrade_seq", "src>dest") self._install(self.servers[:self.src_init + self.dest_init]) PauseResumeXDCRBaseTest.setUp(self) self.set_xdcr_param('xdcrFailureRestartInterval', 1) self.sleep(60) bucket = self._get_bucket(self, 'default', self.src_master) self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0) bucket = self._get_bucket(self, 'bucket0', self.src_master) self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0) bucket = self._get_bucket(self, 'bucket0', self.dest_master) gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items) self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0) self.sleep(self.wait_timeout) self._wait_for_replication_to_catchup() nodes_to_upgrade = [] if upgrade_seq == "src>dest": nodes_to_upgrade = copy.copy(self.src_nodes) nodes_to_upgrade.extend(self.dest_nodes) elif upgrade_seq == "src<dest": nodes_to_upgrade = copy.copy(self.dest_nodes) nodes_to_upgrade.extend(self.src_nodes) elif upgrade_seq == "src><dest": min_cluster = min(len(self.src_nodes), len(self.dest_nodes)) for i in xrange(min_cluster): nodes_to_upgrade.append(self.src_nodes[i]) nodes_to_upgrade.append(self.dest_nodes[i]) for _seq, node in enumerate(nodes_to_upgrade): self._offline_upgrade([node]) self.set_xdcr_param('xdcrFailureRestartInterval', 1) self.sleep(60) bucket = self._get_bucket(self, 'bucket0', self.src_master) itemPrefix = "loadThree" + _seq * 'a' gen_create3 = BlobGenerator(itemPrefix, itemPrefix, self._value_size, end=self.num_items) self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0) bucket = self._get_bucket(self, 'default', self.src_master) itemPrefix = "loadFour" + _seq * 'a' gen_create4 = BlobGenerator(itemPrefix, itemPrefix, self._value_size, end=self.num_items) self._load_bucket(bucket, self.src_master, gen_create4, 'create', exp=0) self.sleep(60) bucket = self._get_bucket(self, 'bucket0', self.src_master) self.do_merge_bucket(self.src_master, self.dest_master, True, bucket) bucket = self._get_bucket(self, 'default', self.src_master) self.do_merge_bucket(self.src_master, self.dest_master, False, bucket) self.verify_xdcr_stats(self.src_nodes, self.dest_nodes) self.sleep(self.wait_timeout * 5, "Let clusters work for some time")
def online_cluster_upgrade(self): self._install(self.servers[:self.src_init + self.dest_init]) prev_initial_version = self.initial_version self.initial_version = self.upgrade_versions[0] self._install(self.servers[self.src_init + self.dest_init:]) PauseResumeXDCRBaseTest.setUp(self) if prev_initial_version < "3.0.0": self.pause_xdcr_cluster = "" bucket_default = self._get_bucket(self, 'default', self.src_master) bucket_sasl = self._get_bucket(self, 'bucket0', self.src_master) bucket_standard = self._get_bucket(self, 'standard_bucket0', self.dest_master) self._load_bucket(bucket_default, self.src_master, self.gen_create, 'create', exp=0) self._load_bucket(bucket_sasl, self.src_master, self.gen_create, 'create', exp=0) self._load_bucket(bucket_standard, self.dest_master, self.gen_create, 'create', exp=0) gen_create2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size, end=self.num_items) self._load_bucket(bucket_sasl, self.dest_master, gen_create2, 'create', exp=0) if self.pause_xdcr_cluster != "": self.pause_xdcr() self._online_upgrade(self.src_nodes, self.servers[self.src_init + self.dest_init:]) self._install(self.src_nodes) self._online_upgrade(self.servers[self.src_init + self.dest_init:], self.src_nodes, False) self._load_bucket(bucket_default, self.src_master, self.gen_delete, 'delete', exp=0) self._load_bucket(bucket_default, self.src_master, self.gen_update, 'create', exp=self._expires) self._load_bucket(bucket_sasl, self.src_master, self.gen_delete, 'delete', exp=0) self._load_bucket(bucket_sasl, self.src_master, self.gen_update, 'create', exp=self._expires) self.sleep(120) self._install(self.servers[self.src_init + self.dest_init:]) self.sleep(60) self._online_upgrade(self.dest_nodes, self.servers[self.src_init + self.dest_init:]) self._install(self.dest_nodes) self.sleep(60) self._online_upgrade(self.servers[self.src_init + self.dest_init:], self.dest_nodes, False) self._load_bucket(bucket_standard, self.dest_master, self.gen_delete, 'delete', exp=0) self._load_bucket(bucket_standard, self.dest_master, self.gen_update, 'create', exp=self._expires) if self.pause_xdcr_cluster != "": self.resume_xdcr() self.do_merge_bucket(self.src_master, self.dest_master, True, bucket_sasl) bucket_sasl = self._get_bucket(self, 'bucket0', self.dest_master) gen_delete2 = BlobGenerator( 'loadTwo', 'loadTwo-', self._value_size, start=int( (self.num_items) * (float)(100 - self._percent_delete) / 100), end=self.num_items) gen_update2 = BlobGenerator( 'loadTwo', 'loadTwo-', self._value_size, start=0, end=int(self.num_items * (float)(self._percent_update) / 100)) self._load_bucket(bucket_sasl, self.dest_master, gen_delete2, 'delete', exp=0) self._load_bucket(bucket_sasl, self.dest_master, gen_update2, 'create', exp=self._expires) self.do_merge_bucket(self.dest_master, self.src_master, False, bucket_sasl) self.do_merge_bucket(self.src_master, self.dest_master, False, bucket_default) self.do_merge_bucket(self.dest_master, self.src_master, False, bucket_standard) self.sleep(120) self._post_upgrade_ops() self.sleep(120) self.verify_xdcr_stats(self.src_nodes, self.dest_nodes) self.max_verify = None if self.ddocs_src: for bucket_name in self.buckets_on_src: bucket = self._get_bucket(self, bucket_name, self.src_master) expected_rows = sum( [len(kv_store) for kv_store in bucket.kvs.values()]) self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_src, self.src_master) if self.ddocs_dest: for bucket_name in self.buckets_on_dest: bucket = self._get_bucket(self, bucket_name, self.dest_master) expected_rows = sum( [len(kv_store) for kv_store in bucket.kvs.values()]) self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_dest, self.dest_master)
def offline_cluster_upgrade(self): self._install(self.servers[:self.src_init + self.dest_init]) upgrade_nodes = self.input.param('upgrade_nodes', "src").split(";") PauseResumeXDCRBaseTest.setUp(self) self.set_xdcr_param('xdcrFailureRestartInterval', 1) if self.initial_version < "3.0.0": self.pause_xdcr_cluster = "" self.sleep(60) bucket = self._get_bucket(self, 'default', self.src_master) self._operations() self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0) bucket = self._get_bucket(self, 'bucket0', self.src_master) self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0) bucket = self._get_bucket(self, 'bucket0', self.dest_master) gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items) self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0) nodes_to_upgrade = [] if "src" in upgrade_nodes: nodes_to_upgrade += self.src_nodes if "dest" in upgrade_nodes: nodes_to_upgrade += self.dest_nodes self.sleep(60) self._wait_for_replication_to_catchup() if self.pause_xdcr_cluster != "": self.pause_xdcr() self._offline_upgrade(nodes_to_upgrade) if self._use_encryption_after_upgrade and "src" in upgrade_nodes and "dest" in upgrade_nodes and self.upgrade_versions[ 0] >= "2.5.0": if "src" in self._use_encryption_after_upgrade: src_remote_clusters = RestConnection( self.src_master).get_remote_clusters() for remote_cluster in src_remote_clusters: self._modify_clusters(None, self.src_master, remote_cluster['name'], self.dest_master, require_encryption=1) if "dest" in self._use_encryption_after_upgrade: dest_remote_clusters = RestConnection( self.dest_master).get_remote_clusters() for remote_cluster in dest_remote_clusters: self._modify_clusters(None, self.dest_master, remote_cluster['name'], self.src_master, require_encryption=1) self.set_xdcr_param('xdcrFailureRestartInterval', 1) self.sleep(60) bucket = self._get_bucket(self, 'bucket0', self.src_master) gen_create3 = BlobGenerator('loadThree', 'loadThree', self._value_size, end=self.num_items) self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0) bucket = self._get_bucket(self, 'bucket0', self.dest_master) gen_create4 = BlobGenerator('loadFour', 'loadFour', self._value_size, end=self.num_items) self._load_bucket(bucket, self.dest_master, gen_create4, 'create', exp=0) if self.pause_xdcr_cluster != "": self.resume_xdcr() self.do_merge_bucket(self.src_master, self.dest_master, True, bucket) bucket = self._get_bucket(self, 'default', self.src_master) self._load_bucket(bucket, self.src_master, gen_create2, 'create', exp=0) self.do_merge_bucket(self.src_master, self.dest_master, False, bucket) self.sleep(60) self._post_upgrade_ops() self.sleep(60) self.verify_xdcr_stats(self.src_nodes, self.dest_nodes) self.max_verify = None if self.ddocs_src: for bucket_name in self.buckets_on_src: bucket = self._get_bucket(self, bucket_name, self.src_master) expected_rows = sum( [len(kv_store) for kv_store in bucket.kvs.values()]) self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_src, self.src_master) if self.ddocs_dest: for bucket_name in self.buckets_on_dest: bucket = self._get_bucket(self, bucket_name, self.dest_master) expected_rows = sum( [len(kv_store) for kv_store in bucket.kvs.values()]) self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_dest, self.dest_master)