Exemplo n.º 1
0
    def test_continuous_unidirectional_deletes_2(self):
        cluster_ref_a = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        # Load some data on cluster a. Do it a few times so that the seqnos are
        # bumped up and then delete it.
        kvstore = ClientKeyValueStore()
        self._params["ops"] = "set"
        load_thread_list = []
        for i in [1, 2, 3]:
            task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
            load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                            self._buckets[0],
                                                            task_def, kvstore)
            load_thread_list.append(load_thread)

        for lt in load_thread_list:
            lt.start()
        for lt in load_thread_list:
            lt.join()
        time.sleep(10)

        self._params["ops"] = "delete"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                        self._buckets[0],
                                                        task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Start replication to replicate the deletes from cluster a
        # to cluster b where the keys never existed.
        replication_type = "continuous"
        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
                                       master_b.rest_username,
                                       master_b.rest_password, cluster_ref_b)
        (rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
                                                               self._buckets[0],
                                                               cluster_ref_b)
        self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))

        time.sleep(15)

        # Verify replicated data#
        self.assertTrue(XDCRBaseTest.verify_del_items(rest_conn_a,
                                                      rest_conn_b,
                                                      self._buckets[0],
                                                      kvstore.keys(),
                                                      self._poll_sleep,
                                                      self._poll_timeout),
                        "Changes feed verification failed")
 def __init__(self,
              rest,
              bucket,
              kv_store=None,
              info=None,
              store_enabled=True):
     SDKSmartClient.__init__(self, rest, bucket, info)
     self.kv_store = kv_store or ClientKeyValueStore()
     self.store_enabled = store_enabled
     self._rlock = threading.Lock()
Exemplo n.º 3
0
    def load_all_buckets_task(rest, task_manager, bucket_data, ram_load_ratio,
                              distribution=None, keys_count= -1, seed=None,
                              monitor=True):
        buckets = rest.get_buckets()
        tasks = None

        for bucket in buckets:
            kv_store = bucket_data[bucket.name].get('kv_store', None)
            if  kv_store is None:
                kv_store = ClientKeyValueStore()
                bucket_data[bucket.name]['kv_store'] = kv_store
            tasks = RebalanceBaseTest.load_bucket_task_helper(rest, task_manager,
                bucket.name, ram_load_ratio,
                kv_store=kv_store,
                keys_count=keys_count,
                seed=seed,
                monitor=monitor)
        return tasks
Exemplo n.º 4
0
 def test_rebalance_in(self):
     log = logger.Logger().get_logger()
     master = self._servers[0]
     num_of_docs = TestInputSingleton.input.param("num_of_docs", 100000)
     replica = TestInputSingleton.input.param("replica", 100000)
     add_items_count = TestInputSingleton.input.param(
         "num_of_creates", 30000)
     rebalance_in = TestInputSingleton.input.param("rebalance_in", 1)
     size = TestInputSingleton.input.param("item_size", 256)
     params = {
         "sizes": [size],
         "count": num_of_docs,
         "seed": str(uuid.uuid4())[:7]
     }
     RebalanceBaseTest.common_setup(self._input, self, replica=1)
     rest = RestConnection(master)
     buckets = rest.get_buckets()
     bucket_data = {}
     generators = {}
     for bucket in buckets:
         bucket_data[bucket.name] = {"kv_store": ClientKeyValueStore()}
     while len(rest.node_statuses()) < len(self._servers):
         for bucket in buckets:
             kv_store = bucket_data[bucket.name]["kv_store"]
             add_items_seed = str(uuid.uuid4())[:7]
             self._add_items(add_items_seed, bucket, add_items_count,
                             kv_store)
             errors = RebalanceDataGenerator.do_verification(
                 kv_store, rest, bucket.name)
             if errors:
                 log.error("verification returned {0} errors".format(
                     len(errors)))
             load_set_ops = {"ops": "set", "bucket": bucket.name}
             load_set_ops.update(params)
             load_delete_ops = {
                 "ops": "delete",
                 "bucket": bucket.name,
                 "sizes": [size],
                 "count": add_items_count / 5,
                 "seed": add_items_seed
             }
             thread = RebalanceDataGenerator.start_load(
                 rest, bucket.name,
                 RebalanceDataGenerator.create_loading_tasks(load_set_ops),
                 kv_store)
             generators["set"] = {"thread": thread}
             #restart three times
             generators["set"]["thread"].start()
             thread = RebalanceDataGenerator.start_load(
                 rest, bucket.name,
                 RebalanceDataGenerator.create_loading_tasks(
                     load_delete_ops), kv_store)
             generators["delete"] = {"thread": thread}
             generators["delete"]["thread"].start()
         self.log.info("current nodes : {0}".format(
             [node.id for node in rest.node_statuses()]))
         rebalanced_in, which_servers = RebalanceBaseTest.rebalance_in(
             self._servers, rebalance_in)
         self.assertTrue(rebalanced_in,
                         msg="unable to add and rebalance more nodes")
         for bucket in buckets:
             kv_store = bucket_data[bucket.name]["kv_store"]
             errors = RebalanceDataGenerator.do_verification(
                 kv_store, rest, bucket.name)
             if errors:
                 log.error("verification returned {0} errors".format(
                     len(errors)))
         generators["set"]["thread"].join()
         generators["delete"]["thread"].join()
         for bucket in buckets:
             kv_store = bucket_data[bucket.name]["kv_store"]
             bucket_data[bucket.name]["items_inserted_count"] = len(
                 kv_store.valid_items())
             RebalanceBaseTest.replication_verification(
                 master, bucket_data, replica, self)