def _getr_items(self, item_count, replica_count, prefix, vprefix=""): time_start = time.time() get_count = 0 last_error = "" error_count = 0 awareness = VBucketAwareMemcached(self.rest, self.default_bucket_name) for r in range(replica_count): for i in range(item_count): retry = True key = prefix + "_key_" + str(i) while retry: client = awareness.memcached(key, r) try: value = client.getr(prefix + "_key_" + str(i))[2] assert(value == vprefix + "_value_" + str(i)) get_count += 1 retry = False except mc_bin_client.MemcachedError as e: last_error = "failed to getr key {0}, error: {1}".format(prefix + "_key_" + str(i), e) error_count += 1 if e.status == 7: self.log.info("getting new vbucket map {0}") awareness.reset(self.rest) else: retry = False except Exception as e: last_error = "failed to getr key {0}, error: {1}".format(prefix + "_key_" + str(i), e) error_count += 1 retry = False if error_count > 0: self.log.error("got {0} errors, last error: {1}".format(error_count, last_error)) self.log.info("got {0} replica items in {1} seconds".format(get_count, time.time() - time_start)) awareness.done() return get_count
def _reader_thread(self, inserted_keys, bucket_data, moxi=False): errors = [] rest = RestConnection(self._servers[0]) smartclient = None for name in bucket_data: for key in inserted_keys: if moxi: moxi = MemcachedClientHelper.proxy_client(self._servers[0], name) else: smartclient = VBucketAwareMemcached(rest, name) try: if moxi: moxi.get(key) else: smartclient.memcached(key).get(key) except Exception as ex: errors.append({"error": ex, "key": key}) self.log.info(ex) if not moxi: smartclient.done() smartclient = VBucketAwareMemcached(rest, name)
if __name__ == "__main__": config = Config(sys.argv[1:]) kv = KVStore() rest = RestConnection(config.master) awareness = VBucketAwareMemcached(rest, config.bucket) for i in range(config.sets): key = config.prefix + str(i) value = str(uuid.uuid4()) kv.set(key, 0, 0, value) set_aware(awareness, rest, key, 0, 0, value) for i in range(config.mutations): key = config.prefix + str(random.randint(0, config.sets)) value = str(uuid.uuid4()) kv.set(key, 0, 0, value) set_aware(awareness, rest, key, 0, 0, value) for i in range(config.deletes): key = config.prefix + str(i) kv.delete(key) delete_aware(awareness, rest, key) awareness.done() kv.save(config.filename)
class SyncReplicationTest(unittest.TestCase): awareness = None def common_setup(self, replica): self._input = TestInputSingleton.input self._servers = self._input.servers first = self._servers[0] self.log = logger.Logger().get_logger() self.log.info(self._input) rest = RestConnection(first) for server in self._servers: RestHelper(RestConnection(server)).is_ns_server_running() ClusterOperationHelper.cleanup_cluster(self._servers) BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self) ClusterOperationHelper.add_all_nodes_or_assert( self._servers[0], self._servers, self._input.membase_settings, self) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) info = rest.get_nodes_self() bucket_ram = info.mcdMemoryReserved * 3 / 4 rest.create_bucket(bucket="default", ramQuotaMB=int(bucket_ram), replicaNumber=replica, proxyPort=rest.get_nodes_self().moxi) msg = "wait_for_memcached fails" ready = BucketOperationHelper.wait_for_memcached(first, "default"), self.assertTrue(ready, msg) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue( rebalanceStarted, "unable to start rebalance on master node {0}".format(first.ip)) self.log.info('started rebalance operation on master node {0}'.format( first.ip)) rebalanceSucceeded = rest.monitorRebalance() # without a bucket this seems to fail self.assertTrue( rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful".format( otpNodeIds)) self.awareness = VBucketAwareMemcached(rest, "default") def tearDown(self): if self.awareness: self.awareness.done() ClusterOperationHelper.cleanup_cluster(self._servers) BucketOperationHelper.delete_all_buckets_or_assert( self._servers, self) def test_one_replica(self): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 1024) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) mc.sync_replication([{"key": k, "vbucket": vBucket}], 1) for k in keys: mc = self.awareness.memcached(k) mc.get(k) def test_one_replica_one_node(self): pass def test_one_replica_multiple_nodes(self): pass def test_one_replica_bucket_replica_one(self): pass def test_two_replica(self): self._unsupported_replicas(2) def test_three_replica(self): self._unsupported_replicas(1) def _unsupported_replicas(self, replica): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 102400) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) mc.get(k) try: mc.sync_replication([{"key": k, "vbucket": vBucket}], replica) msg = "server did not raise an error when running sync_replication with {0} replicas" self.fail(msg.format(replica)) except MemcachedError as error: self.log.info("error {0} {1} as expected".format( error.status, error.msg)) for k in keys: mc = self.awareness.memcached(k) mc.get(k) def test_invalid_key(self): pass def test_not_your_vbucket(self): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 1024) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) not_your_vbucket_mc = self.awareness.not_my_vbucket_memcached(k) try: count = 0 expected_error = 0 while count < 100: a, b, response = not_your_vbucket_mc.sync_replication( [{ "key": k, "vbucket": vBucket }], 1) count += 1 self.log.info("response : {0}".format(response)) if response and response[0]["event"] != "invalid key": expected_error += 1 if expected_error is not 100: self.fail( msg= "server did not raise an error when running sync_replication with invalid vbucket" ) except MemcachedError as error: self.log.error(error) def test_some_invalid_keys(self): pass def stest_ome_not_your_vbucket(self): pass def test_some_large_values(self): pass def test_too_many_keys(self): pass def test_singlenode(self): pass
class SyncReplicationTest(unittest.TestCase): awareness = None def common_setup(self, replica): self._input = TestInputSingleton.input self._servers = self._input.servers first = self._servers[0] self.log = logger.Logger().get_logger() self.log.info(self._input) rest = RestConnection(first) for server in self._servers: RestHelper(RestConnection(server)).is_ns_server_running() ClusterOperationHelper.cleanup_cluster(self._servers) BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self) ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self) nodes = rest.node_statuses() otpNodeIds = [] for node in nodes: otpNodeIds.append(node.id) info = rest.get_nodes_self() bucket_ram = info.mcdMemoryReserved * 3 / 4 rest.create_bucket(bucket="default", ramQuotaMB=int(bucket_ram), replicaNumber=replica, proxyPort=rest.get_nodes_self().moxi) msg = "wait_for_memcached fails" ready = BucketOperationHelper.wait_for_memcached(first, "default"), self.assertTrue(ready, msg) rebalanceStarted = rest.rebalance(otpNodeIds, []) self.assertTrue(rebalanceStarted, "unable to start rebalance on master node {0}".format(first.ip)) self.log.info('started rebalance operation on master node {0}'.format(first.ip)) rebalanceSucceeded = rest.monitorRebalance() # without a bucket this seems to fail self.assertTrue(rebalanceSucceeded, "rebalance operation for nodes: {0} was not successful".format(otpNodeIds)) self.awareness = VBucketAwareMemcached(rest, "default") def tearDown(self): if self.awareness: self.awareness.done() ClusterOperationHelper.cleanup_cluster(self._servers) BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self) def test_one_replica(self): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 1024) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) mc.sync_replication(1, [{"key": k, "vbucket": vBucket}]) for k in keys: mc = self.awareness.memcached(k) mc.get(k) def test_one_replica_one_node(self): pass def test_one_replica_multiple_nodes(self): pass def test_one_replica_bucket_replica_one(self): pass def test_two_replica(self): self._unsupported_replicas(2) def test_three_replica(self): self._unsupported_replicas(1) def _unsupported_replicas(self, replica): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 102400) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) mc.get(k) try: mc.sync_replication(replica, [{"key": k, "vbucket": vBucket}]) msg = "server did not raise an error when running sync_replication with {0} replicas" self.fail(msg.format(replica)) except MemcachedError as error: self.log.info("error {0} {1} as expected".format(error.status, error.msg)) for k in keys: mc = self.awareness.memcached(k) mc.get(k) def test_invalid_key(self): pass def test_not_your_vbucket(self): self.common_setup(1) keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)] value = MemcachedClientHelper.create_value("*", 1024) for k in keys: vBucket = crc32.crc32_hash(k) mc = self.awareness.memcached(k) mc.set(k, 0, 0, value) not_your_vbucket_mc = self.awareness.not_my_vbucket_memcached(k) try: count = 0 expected_error = 0 while count < 100: a, b, response = not_your_vbucket_mc.sync_replication(1, [{"key": k, "vbucket": vBucket}]) count += 1 self.log.info("response : {0}".format(response)) if response and response[0]["event"] != "invalid key": expected_error += 1 if expected_error is not 100: self.fail(msg="server did not raise an error when running sync_replication with invalid vbucket") except MemcachedError as error: self.log.error(error) def test_some_invalid_keys(self): pass def stest_ome_not_your_vbucket(self): pass def test_some_large_values(self): pass def test_too_many_keys(self): pass def test_singlenode(self): pass
rest = RestConnection(config.master) awareness = VBucketAwareMemcached(rest, config.bucket) undeleted = 0 missing = 0 badval = 0 for key, val_expected in kv.iteritems(): if val_expected[3]: try: val = get_aware(awareness, rest, key) if val[2] != val_expected[2]: badval += 1 except mc_bin_client.MemcachedError as e: if e.status == 1: missing += 1 else: raise e else: try: val = get_aware(awareness, rest, key) undeleted += 1 except mc_bin_client.MemcachedError as e: if e.status != 1: raise e awareness.done() print "undeleted:", undeleted print "missing:", missing print "badval:", badval