Ejemplo n.º 1
0
 def test_eventing_processes_mutations_when_mutated_through_subdoc_api_and_set_expiry_through_sdk(self):
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip, name=self.src_bucket_name)
     bucket = Bucket(url, username="******", password="******")
     for docid in ['customer123', 'customer1234', 'customer12345']:
         bucket.insert(docid, {'some': 'value'})
     body = self.create_save_function_body(self.function_name, self.handler_code,
                                           dcp_stream_boundary="from_now")
     # deploy eventing function
     self.deploy_function(body)
     # upserting a new sub-document
     bucket.mutate_in('customer123', SD.upsert('fax', '775-867-5309'))
     # inserting a sub-document
     bucket.mutate_in('customer1234', SD.insert('purchases.complete', [42, True, None], create_parents=True))
     # Creating and populating an array document
     bucket.mutate_in('customer12345', SD.array_append('purchases.complete', ['Hello'], create_parents=True))
     self.verify_eventing_results(self.function_name, 3, skip_stats_validation=True)
     for docid in ['customer123', 'customer1234', 'customer12345']:
         # set expiry on all the docs created using sub doc API
         bucket.touch(docid, ttl=5)
     self.sleep(10, "wait for expiry of the documents")
     # Wait for eventing to catch up with all the expiry mutations and verify results
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
Ejemplo n.º 2
0
 def test_eventing_processes_mutations_when_mutated_through_subdoc_api_and_set_expiry_through_sdk(self):
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip, name=self.src_bucket_name)
     bucket = Bucket(url, username="******", password="******")
     for docid in ['customer123', 'customer1234', 'customer12345']:
         bucket.insert(docid, {'some': 'value'})
     body = self.create_save_function_body(self.function_name, self.handler_code,
                                           dcp_stream_boundary="from_now")
     # deploy eventing function
     self.deploy_function(body)
     # upserting a new sub-document
     bucket.mutate_in('customer123', SD.upsert('fax', '775-867-5309'))
     # inserting a sub-document
     bucket.mutate_in('customer1234', SD.insert('purchases.complete', [42, True, None], create_parents=True))
     # Creating and populating an array document
     bucket.mutate_in('customer12345', SD.array_append('purchases.complete', ['Hello'], create_parents=True))
     self.verify_eventing_results(self.function_name, 3, skip_stats_validation=True)
     for docid in ['customer123', 'customer1234', 'customer12345']:
         # set expiry on all the docs created using sub doc API
         bucket.touch(docid, ttl=5)
     self.sleep(10, "wait for expiry of the documents")
     # Wait for eventing to catch up with all the expiry mutations and verify results
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
Ejemplo n.º 3
0
 def reboot_server(self, server):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.reboot_node()
     remote_client.disconnect()
     # wait for restart and warmup on all node
     self.sleep(self.wait_timeout * 2)
     # disable firewall on these nodes
     self.stop_firewall_on_node(server)
     # wait till node is ready after warmup
     ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
Ejemplo n.º 4
0
 def reboot_server(self, server):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.reboot_node()
     remote_client.disconnect()
     # wait for restart and warmup on all node
     self.sleep(self.wait_timeout * 5)
     # disable firewall on these nodes
     self.stop_firewall_on_node(server)
     # wait till node is ready after warmup
     ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
Ejemplo n.º 5
0
 def kill_erlang_service(self, server):
     remote_client = RemoteMachineShellConnection(server)
     os_info = remote_client.extract_remote_info()
     log.info("os_info : {0}".format(os_info))
     if os_info.type.lower() == "windows":
         remote_client.kill_erlang(os="windows")
     else:
         remote_client.kill_erlang()
     remote_client.start_couchbase()
     remote_client.disconnect()
     # wait for restart and warmup on all node
     self.sleep(self.wait_timeout * 2)
     # wait till node is ready after warmup
     ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
Ejemplo n.º 6
0
 def kill_erlang_service(self, server):
     remote_client = RemoteMachineShellConnection(server)
     os_info = remote_client.extract_remote_info()
     log.info("os_info : {0}", os_info)
     if os_info.type.lower() == "windows":
         remote_client.kill_erlang(os="windows")
     else:
         remote_client.kill_erlang()
     remote_client.start_couchbase()
     remote_client.disconnect()
     # wait for restart and warmup on all node
     self.sleep(self.wait_timeout * 2)
     # wait till node is ready after warmup
     ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
Ejemplo n.º 7
0
    def save_snapshots(self, file_base, bucket):
        """Save snapshots on all servers"""
        if not self.input.servers or not bucket:
            self.log.error("invalid server list or bucket name")
            return False

        ClusterOperationHelper.stop_cluster(self.input.servers)

        for server in self.input.servers:
            self._save_snapshot(server, bucket, file_base)

        ClusterOperationHelper.start_cluster(self.input.servers)

        return True
Ejemplo n.º 8
0
    def save_snapshots(self, file_base, bucket):
        """Save snapshots on all servers"""
        if not self.input.servers or not bucket:
            self.log.error("invalid server list or bucket name")
            return False

        ClusterOperationHelper.stop_cluster(self.input.servers)

        for server in self.input.servers:
            self._save_snapshot(server, bucket, file_base)

        ClusterOperationHelper.start_cluster(self.input.servers)

        return True
Ejemplo n.º 9
0
 def test_enforcing_tls_during_handler_lifecycle_operation(self):
     ntonencryptionBase().setup_nton_cluster(
         self.servers, clusterEncryptionLevel=self.ntonencrypt_level)
     body = self.create_save_function_body(
         self.function_name, "handler_code/ABO/insert_rebalance.js")
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size)
     self.deploy_function(body)
     self.verify_doc_count_collections("dst_bucket._default._default",
                                       self.docs_per_day * self.num_docs)
     if self.pause_resume:
         self.pause_function(body)
     else:
         self.undeploy_function(body)
     if self.pause_resume:
         self.resume_function(body, wait_for_resume=False)
     else:
         self.deploy_function(body, wait_for_bootstrap=False)
     ntonencryptionBase().setup_nton_cluster(
         [self.master], clusterEncryptionLevel="strict")
     self.wait_for_handler_state(body['appname'], "deployed")
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size,
               op_type='delete')
     self.verify_doc_count_collections("dst_bucket._default._default", 0)
     assert ClusterOperationHelper.check_if_services_obey_tls(
         servers=[self.master
                  ]), "Port binding after enforcing TLS incorrect"
     self.undeploy_and_delete_function(body)
Ejemplo n.º 10
0
    def set_up_dgm(self):
        """Download fragmented, DGM dataset onto each cluster node, if not
        already locally available.

        The number of vbuckets and database schema must match the
        target cluster.

        Shutdown all cluster nodes.

        Do a cluster-restore.

        Restart all cluster nodes."""

        bucket = self.param("bucket", "default")
        ClusterOperationHelper.stop_cluster(self.input.servers)
        for server in self.input.servers:
            remote = RemoteMachineShellConnection(server)
            #TODO: Better way to pass num_nodes and db_size?
            self.get_data_files(remote, bucket, 1, 10)
            remote.disconnect()
        ClusterOperationHelper.start_cluster(self.input.servers)
Ejemplo n.º 11
0
    def set_up_dgm(self):
        """Download fragmented, DGM dataset onto each cluster node, if not
        already locally available.

        The number of vbuckets and database schema must match the
        target cluster.

        Shutdown all cluster nodes.

        Do a cluster-restore.

        Restart all cluster nodes."""

        bucket = self.param("bucket", "default")
        ClusterOperationHelper.stop_cluster(self.input.servers)
        for server in self.input.servers:
            remote = RemoteMachineShellConnection(server)
            #TODO: Better way to pass num_nodes and db_size?
            self.get_data_files(remote, bucket, 1, 10)
            remote.disconnect()
        ClusterOperationHelper.start_cluster(self.input.servers)
Ejemplo n.º 12
0
    def load_snapshots(self, file_base, bucket):
        """Load snapshots on all servers"""
        if not self.input.servers or not bucket:
            print "[perf: load_snapshot] invalid server list or bucket name"
            return False

        ClusterOperationHelper.stop_cluster(self.input.servers)

        for server in self.input.servers:
            if not self._load_snapshot(server, bucket, file_base):
                ClusterOperationHelper.start_cluster(self.input.servers)
                return False

        ClusterOperationHelper.start_cluster(self.input.servers)

        return True
Ejemplo n.º 13
0
    def load_snapshots(self, file_base, bucket):
        """Load snapshots on all servers"""
        if not self.input.servers or not bucket:
            print "[perf: load_snapshot] invalid server list or bucket name"
            return False

        ClusterOperationHelper.stop_cluster(self.input.servers)

        for server in self.input.servers:
            if not self._load_snapshot(server, bucket, file_base):
                ClusterOperationHelper.start_cluster(self.input.servers)
                return False

        ClusterOperationHelper.start_cluster(self.input.servers)

        return True
Ejemplo n.º 14
0
 def tear_down_cluster(self):
     self.log.info("tearing down cluster")
     ClusterOperationHelper.cleanup_cluster(self.input.servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         self.input.servers, self)
     self.log.info("Cluster teared down")
Ejemplo n.º 15
0
    def loop(self,
             num_ops=None,
             num_items=None,
             max_items=None,
             max_creates=None,
             min_value_size=None,
             exit_after_creates=0,
             kind='binary',
             protocol='binary',
             clients=1,
             ratio_misses=0.0,
             ratio_sets=0.0,
             ratio_creates=0.0,
             ratio_deletes=0.0,
             ratio_hot=0.2,
             ratio_hot_sets=0.95,
             ratio_hot_gets=0.95,
             ratio_expirations=0.0,
             expiration=None,
             test_name=None,
             prefix="",
             doc_cache=1,
             use_direct=True,
             collect_server_stats=True,
             start_at=-1,
             report=0,
             ctl=None,
             hot_shift=0,
             is_eperf=False,
             ratio_queries=0,
             queries=0,
             ddoc=None):
        num_items = num_items or self.num_items_loaded

        hot_stack_size = \
            self.parami('hot_stack_size', PerfDefaults.hot_stack_size) or \
            (num_items * ratio_hot)

        cfg = {
            'max-items':
            max_items or num_items,
            'max-creates':
            max_creates or 0,
            'max-ops-per-sec':
            self.parami("mcsoda_max_ops_sec", PerfDefaults.mcsoda_max_ops_sec),
            'min-value-size':
            min_value_size or self.parami("min_value_size", 1024),
            'exit-after-creates':
            exit_after_creates,
            'ratio-sets':
            ratio_sets,
            'ratio-misses':
            ratio_misses,
            'ratio-creates':
            ratio_creates,
            'ratio-deletes':
            ratio_deletes,
            'ratio-hot':
            ratio_hot,
            'ratio-hot-sets':
            ratio_hot_sets,
            'ratio-hot-gets':
            ratio_hot_gets,
            'ratio-expirations':
            ratio_expirations,
            'ratio-queries':
            ratio_queries,
            'expiration':
            expiration or 0,
            'threads':
            clients,
            'json':
            int(kind == 'json'),
            'batch':
            self.parami("batch", PerfDefaults.batch),
            'vbuckets':
            self.vbucket_count,
            'doc-cache':
            doc_cache,
            'prefix':
            prefix,
            'queries':
            queries,
            'report':
            report,
            'hot-shift':
            hot_shift,
            'hot-stack':
            self.parami("hot_stack", PerfDefaults.hot_stack),
            'hot-stack-size':
            hot_stack_size,
            'hot-stack-rotate':
            self.parami("hot_stack_rotate", PerfDefaults.hot_stack_rotate),
            'cluster_name':
            self.param("cluster_name", ""),
            'observe':
            self.param("observe", PerfDefaults.observe),
            'obs-backoff':
            self.paramf('obs_backoff', PerfDefaults.obs_backoff),
            'obs-max-backoff':
            self.paramf('obs_max_backoff', PerfDefaults.obs_max_backoff),
            'obs-persist-count':
            self.parami('obs_persist_count', PerfDefaults.obs_persist_count),
            'obs-repl-count':
            self.parami('obs_repl_count', PerfDefaults.obs_repl_count),
            'woq-pattern':
            self.parami('woq_pattern', PerfDefaults.woq_pattern),
            'woq-verbose':
            self.parami('woq_verbose', PerfDefaults.woq_verbose),
            'cor-pattern':
            self.parami('cor_pattern', PerfDefaults.cor_pattern),
            'cor-persist':
            self.parami('cor_persist', PerfDefaults.cor_persist),
            'time':
            self.parami('time', 0),
            'cbm':
            self.parami('cbm', PerfDefaults.cbm),
            'cbm-host':
            self.param('cbm_host', PerfDefaults.cbm_host),
            'cbm-port':
            self.parami('cbm_port', PerfDefaults.cbm_port)
        }

        cfg_params = cfg.copy()
        cfg_params['test_time'] = time.time()
        cfg_params['test_name'] = test_name
        client_id = ''
        stores = None

        if is_eperf:
            client_id = self.parami("prefix", 0)
        sc = None
        if self.parami("collect_stats", 1):
            sc = self.start_stats(self.spec_reference + ".loop",
                                  test_params=cfg_params,
                                  client_id=client_id,
                                  collect_server_stats=collect_server_stats,
                                  ddoc=ddoc)

        self.cur = {'cur-items': num_items}
        if start_at >= 0:
            self.cur['cur-gets'] = start_at
        if num_ops is None:
            num_ops = num_items
        if isinstance(num_ops, int):
            cfg['max-ops'] = num_ops
        else:
            # Here, we num_ops looks like "time to run" tuple of...
            # ('seconds', integer_num_of_seconds_to_run)
            cfg['time'] = num_ops[1]

        # For Black box, multi node tests
        # always use membase-binary
        if self.is_multi_node:
            protocol = self.mk_protocol(host=self.input.servers[0].ip,
                                        port=self.input.servers[0].port)

        self.log.info("mcsoda - protocol %s" % protocol)
        protocol, host_port, user, pswd = \
            self.protocol_parse(protocol, use_direct=use_direct)

        if not user.strip():
            if "11211" in host_port:
                user = self.param("bucket", "default")
            else:
                user = self.input.servers[0].rest_username
        if not pswd.strip():
            if not "11211" in host_port:
                pswd = self.input.servers[0].rest_password

        self.log.info("mcsoda - %s %s %s %s" %
                      (protocol, host_port, user, pswd))
        self.log.info("mcsoda - cfg: " + str(cfg))
        self.log.info("mcsoda - cur: " + str(self.cur))

        # For query tests always use StoreCouchbase
        if protocol == "couchbase":
            stores = [StoreCouchbase()]

        self.cur, start_time, end_time = \
            self.mcsoda_run(cfg, self.cur, protocol, host_port, user, pswd,
                            stats_collector=sc, ctl=ctl, stores=stores,
                            heartbeat=self.parami("mcsoda_heartbeat", 0),
                            why="loop", bucket=self.param("bucket", "default"))

        ops = {
            'tot-sets': self.cur.get('cur-sets', 0),
            'tot-gets': self.cur.get('cur-gets', 0),
            'tot-items': self.cur.get('cur-items', 0),
            'tot-creates': self.cur.get('cur-creates', 0),
            'tot-misses': self.cur.get('cur-misses', 0),
            "start-time": start_time,
            "end-time": end_time
        }

        # Wait until there are no active indexing tasks
        if self.parami('wait_for_indexer', 0):
            ClusterOperationHelper.wait_for_completion(self.rest, 'indexer')

        # Wait until there are no active view compaction tasks
        if self.parami('wait_for_compaction', 0):
            ClusterOperationHelper.wait_for_completion(self.rest,
                                                       'view_compaction')

        if self.parami("loop_wait_until_drained",
                       PerfDefaults.loop_wait_until_drained):
            self.wait_until_drained()

        if self.parami("loop_wait_until_repl",
                       PerfDefaults.loop_wait_until_repl):
            self.wait_until_repl()

        if self.parami("collect_stats", 1) and \
                not self.parami("reb_no_fg", PerfDefaults.reb_no_fg):
            self.end_stats(sc, ops, self.spec_reference + ".loop")

        self.log.info(
            'Finished access phase for worker: {0}:{1}.'\
            .format(self.params("why", "main"), self.parami("prefix", 0))
        )

        return ops, start_time, end_time
Ejemplo n.º 16
0
 def tear_down_cluster(self):
     print "[perf.tearDown] Tearing down cluster"
     ClusterOperationHelper.cleanup_cluster(self.input.servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.input.servers,
                                                          self)
     print "[perf.tearDown] Cluster teared down"
Ejemplo n.º 17
0
    def loop(self, num_ops=None,
             num_items=None,
             max_items=None,
             max_creates=None,
             min_value_size=None,
             exit_after_creates=0,
             kind='binary',
             protocol='binary',
             clients=1,
             ratio_misses=0.0,
             ratio_sets=0.0, ratio_creates=0.0, ratio_deletes=0.0,
             ratio_hot=0.2, ratio_hot_sets=0.95, ratio_hot_gets=0.95,
             ratio_expirations=0.0,
             expiration=None,
             test_name=None,
             prefix="",
             doc_cache=1,
             use_direct=True,
             collect_server_stats=True,
             start_at= -1,
             report=0,
             ctl=None,
             hot_shift=0,
             is_eperf=False,
             ratio_queries=0,
             queries=0,
             ddoc=None):
        num_items = num_items or self.num_items_loaded

        hot_stack_size = \
            self.parami('hot_stack_size', PerfDefaults.hot_stack_size) or \
            (num_items * ratio_hot)

        cfg = {'max-items': max_items or num_items,
               'max-creates': max_creates or 0,
               'max-ops-per-sec': self.parami("mcsoda_max_ops_sec",
                                              PerfDefaults.mcsoda_max_ops_sec),
               'min-value-size': min_value_size or self.parami("min_value_size",
                                                               1024),
               'exit-after-creates': exit_after_creates,
               'ratio-sets': ratio_sets,
               'ratio-misses': ratio_misses,
               'ratio-creates': ratio_creates,
               'ratio-deletes': ratio_deletes,
               'ratio-hot': ratio_hot,
               'ratio-hot-sets': ratio_hot_sets,
               'ratio-hot-gets': ratio_hot_gets,
               'ratio-expirations': ratio_expirations,
               'ratio-queries': ratio_queries,
               'expiration': expiration or 0,
               'threads': clients,
               'json': int(kind == 'json'),
               'batch': self.parami("batch", PerfDefaults.batch),
               'vbuckets': self.vbucket_count,
               'doc-cache': doc_cache,
               'prefix': prefix,
               'queries': queries,
               'report': report,
               'hot-shift': hot_shift,
               'hot-stack': self.parami("hot_stack", PerfDefaults.hot_stack),
               'hot-stack-size': hot_stack_size,
               'hot-stack-rotate': self.parami("hot_stack_rotate",
                                               PerfDefaults.hot_stack_rotate),
               'cluster_name': self.param("cluster_name", ""),
               'observe': self.param("observe", PerfDefaults.observe),
               'obs-backoff': self.paramf('obs_backoff',
                                          PerfDefaults.obs_backoff),
               'obs-max-backoff': self.paramf('obs_max_backoff',
                                              PerfDefaults.obs_max_backoff),
               'obs-persist-count': self.parami('obs_persist_count',
                                                PerfDefaults.obs_persist_count),
               'obs-repl-count': self.parami('obs_repl_count',
                                             PerfDefaults.obs_repl_count),
               'woq-pattern': self.parami('woq_pattern',
                                         PerfDefaults.woq_pattern),
               'woq-verbose': self.parami('woq_verbose',
                                         PerfDefaults.woq_verbose),
               'cor-pattern': self.parami('cor_pattern',
                                         PerfDefaults.cor_pattern),
               'cor-persist': self.parami('cor_persist',
                                         PerfDefaults.cor_persist),
               'time': self.parami('time', 0),
               'cbm': self.parami('cbm', PerfDefaults.cbm),
               'cbm-host': self.param('cbm_host', PerfDefaults.cbm_host),
               'cbm-port': self.parami('cbm_port', PerfDefaults.cbm_port)}

        cfg_params = cfg.copy()
        cfg_params['test_time'] = time.time()
        cfg_params['test_name'] = test_name
        client_id = ''
        stores = None

        if is_eperf:
            client_id = self.parami("prefix", 0)
        sc = None
        if self.parami("collect_stats", 1):
            sc = self.start_stats(self.spec_reference + ".loop",
                                  test_params=cfg_params, client_id=client_id,
                                  collect_server_stats=collect_server_stats,
                                  ddoc=ddoc)

        self.cur = {'cur-items': num_items}
        if start_at >= 0:
            self.cur['cur-gets'] = start_at
        if num_ops is None:
            num_ops = num_items
        if isinstance(num_ops, int):
            cfg['max-ops'] = num_ops
        else:
            # Here, we num_ops looks like "time to run" tuple of...
            # ('seconds', integer_num_of_seconds_to_run)
            cfg['time'] = num_ops[1]

        # For Black box, multi node tests
        # always use membase-binary
        if self.is_multi_node:
            protocol = self.mk_protocol(host=self.input.servers[0].ip,
                                        port=self.input.servers[0].port)

        self.log.info("mcsoda - protocol %s" % protocol)
        protocol, host_port, user, pswd = \
            self.protocol_parse(protocol, use_direct=use_direct)

        if not user.strip():
            if "11211" in host_port:
                user = self.param("bucket", "default")
            else:
                user = self.input.servers[0].rest_username
        if not pswd.strip():
            if not "11211" in host_port:
                pswd = self.input.servers[0].rest_password

        self.log.info("mcsoda - %s %s %s %s" %
                      (protocol, host_port, user, pswd))
        self.log.info("mcsoda - cfg: " + str(cfg))
        self.log.info("mcsoda - cur: " + str(self.cur))

        # For query tests always use StoreCouchbase
        if protocol == "couchbase":
            stores = [StoreCouchbase()]

        self.cur, start_time, end_time = \
            self.mcsoda_run(cfg, self.cur, protocol, host_port, user, pswd,
                            stats_collector=sc, ctl=ctl, stores=stores,
                            heartbeat=self.parami("mcsoda_heartbeat", 0),
                            why="loop", bucket=self.param("bucket", "default"))

        ops = {'tot-sets': self.cur.get('cur-sets', 0),
               'tot-gets': self.cur.get('cur-gets', 0),
               'tot-items': self.cur.get('cur-items', 0),
               'tot-creates': self.cur.get('cur-creates', 0),
               'tot-misses': self.cur.get('cur-misses', 0),
               "start-time": start_time,
               "end-time": end_time}

        # Wait until there are no active indexing tasks
        if self.parami('wait_for_indexer', 0):
            ClusterOperationHelper.wait_for_completion(self.rest, 'indexer')

        # Wait until there are no active view compaction tasks
        if self.parami('wait_for_compaction', 0):
            ClusterOperationHelper.wait_for_completion(self.rest,
                                                       'view_compaction')

        if self.parami("loop_wait_until_drained",
                       PerfDefaults.loop_wait_until_drained):
            self.wait_until_drained()

        if self.parami("loop_wait_until_repl",
                       PerfDefaults.loop_wait_until_repl):
            self.wait_until_repl()

        if self.parami("collect_stats", 1) and \
                not self.parami("reb_no_fg", PerfDefaults.reb_no_fg):
            self.end_stats(sc, ops, self.spec_reference + ".loop")

        self.log.info(
            'Finished access phase for worker: {0}:{1}.'\
            .format(self.params("why", "main"), self.parami("prefix", 0))
        )

        return ops, start_time, end_time
Ejemplo n.º 18
0
    def setUp(self):
        self.setUpBase0()

        mc_threads = self.parami("mc_threads", PerfDefaults.mc_threads)
        if mc_threads != PerfDefaults.mc_threads:
            for node in self.input.servers:
                self.set_mc_threads(node, mc_threads)

        erlang_schedulers = self.param("erlang_schedulers",
                                       PerfDefaults.erlang_schedulers)
        if erlang_schedulers:
            ClusterOperationHelper.set_erlang_schedulers(self.input.servers,
                                                         erlang_schedulers)
        master = self.input.servers[0]

        self.is_multi_node = False
        self.data_path = master.data_path

        # Number of items loaded by load() method.
        # Does not include or count any items that came from set_up_dgm().
        #
        self.num_items_loaded = 0

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.set_up_cluster(master)
        else:
            master = self.input.servers[0]
            self.set_up_cluster(master)

        # Rebalance
        num_nodes = self.parami("num_nodes", 10)
        self.rebalance_nodes(num_nodes)

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.set_up_buckets()
        else:
            self.set_up_buckets()

        self.set_up_proxy()

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.reconfigure()
        else:
            self.reconfigure()

        if self.parami("dgm", getattr(self, "dgm", 1)):
            self.set_up_dgm()

        time.sleep(10)
        self.setUpBase1()

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                self.wait_until_warmed_up(cluster[0])
        else:
            self.wait_until_warmed_up()
        ClusterOperationHelper.flush_os_caches(self.input.servers)
Ejemplo n.º 19
0
    def setUp(self):
        self.setUpBase0()

        mc_threads = self.parami("mc_threads", PerfDefaults.mc_threads)
        if mc_threads != PerfDefaults.mc_threads:
            for node in self.input.servers:
                self.set_mc_threads(node, mc_threads)

        erlang_schedulers = self.param("erlang_schedulers",
                                       PerfDefaults.erlang_schedulers)
        if erlang_schedulers:
            ClusterOperationHelper.set_erlang_schedulers(
                self.input.servers, erlang_schedulers)
        master = self.input.servers[0]

        self.is_multi_node = False
        self.data_path = master.data_path

        # Number of items loaded by load() method.
        # Does not include or count any items that came from set_up_dgm().
        #
        self.num_items_loaded = 0

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.set_up_cluster(master)
        else:
            master = self.input.servers[0]
            self.set_up_cluster(master)

        # Rebalance
        if self.input.clusters:
            for cluster in self.input.clusters.values():
                num_nodes = self.parami("num_nodes_before", len(cluster))
                self.rebalance_nodes(num_nodes, cluster)
        else:
            num_nodes = self.parami("num_nodes", 10)
            self.rebalance_nodes(num_nodes)

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.set_up_buckets()
        else:
            self.set_up_buckets()

        self.set_up_proxy()

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.reconfigure()
        else:
            self.reconfigure()

        if self.parami("dgm", getattr(self, "dgm", 1)):
            self.set_up_dgm()

        time.sleep(10)
        self.setUpBase1()

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                self.wait_until_warmed_up(cluster[0])
        else:
            self.wait_until_warmed_up()
        ClusterOperationHelper.flush_os_caches(self.input.servers)
Ejemplo n.º 20
0
 def test_eventing_with_enforce_tls_feature(self):
     ntonencryptionBase().disable_nton_cluster([self.master])
     body = self.create_save_function_body(self.function_name,
                                           self.handler_code)
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size)
     self.deploy_function(body)
     self.verify_doc_count_collections("dst_bucket._default._default",
                                       self.docs_per_day * self.num_docs)
     if self.pause_resume:
         self.pause_function(body)
     else:
         self.undeploy_function(body)
     ntonencryptionBase().setup_nton_cluster(
         self.servers, clusterEncryptionLevel=self.ntonencrypt_level)
     if self.pause_resume:
         self.resume_function(body)
     else:
         self.deploy_function(body)
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size,
               op_type='delete')
     self.verify_doc_count_collections("dst_bucket._default._default", 0)
     if self.pause_resume:
         self.pause_function(body)
     else:
         self.undeploy_function(body)
     ntonencryptionBase().setup_nton_cluster(
         [self.master], clusterEncryptionLevel="strict")
     if self.pause_resume:
         self.resume_function(body)
     else:
         self.deploy_function(body)
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size)
     self.verify_doc_count_collections("dst_bucket._default._default",
                                       self.docs_per_day * self.num_docs)
     assert ClusterOperationHelper.check_if_services_obey_tls(
         servers=[self.master
                  ]), "Port binding after enforcing TLS incorrect"
     if self.pause_resume:
         self.pause_function(body)
     else:
         self.undeploy_function(body)
     ntonencryptionBase().setup_nton_cluster(
         self.servers, clusterEncryptionLevel=self.ntonencrypt_level)
     if self.pause_resume:
         self.resume_function(body)
     else:
         self.deploy_function(body)
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size,
               op_type='delete')
     self.verify_doc_count_collections("dst_bucket._default._default", 0)
     if self.pause_resume:
         self.pause_function(body)
     else:
         self.undeploy_function(body)
     ntonencryptionBase().disable_nton_cluster([self.master])
     if self.pause_resume:
         self.resume_function(body)
     else:
         self.deploy_function(body)
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size)
     self.verify_doc_count_collections("dst_bucket._default._default",
                                       self.docs_per_day * self.num_docs)
     self.undeploy_and_delete_function(body)
Ejemplo n.º 21
0
 def tear_down_cluster(self):
     self.log.info("tearing down cluster")
     ClusterOperationHelper.cleanup_cluster(self.input.servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.input.servers,
                                                          self)
     self.log.info("Cluster teared down")
Ejemplo n.º 22
0
 def tear_down_cluster(self):
     print "[perf.tearDown] Tearing down cluster"
     ClusterOperationHelper.cleanup_cluster(self.input.servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         self.input.servers, self)
     print "[perf.tearDown] Cluster teared down"