Beispiel #1
0
    def collect_kpi(self):
        logger.info('Collecting Sync Gateway KPI')

        criteria = OrderedDict((
            (95, self.test_config.gateload_settings.p95_avg_criteria),
            (99, self.test_config.gateload_settings.p99_avg_criteria),
        ))

        summary = defaultdict(dict)
        latencies = defaultdict(list)
        all_requests_per_sec = []
        self.errors = []
        for idx, gateload in enumerate(self.remote.gateloads, start=1):
            for p in criteria:
                kpi = self.KPI.format(p)
                latency = self.metric_helper.calc_push_latency(p=p, idx=idx)
                if latency == 0:
                    status = '{}: Failed to get latency data'.format(gateload)
                    self.errors.append(status)
                summary[gateload][kpi] = latency
                latencies[p].append(latency)
            requests_per_sec = self.metric_helper.calc_requests_per_sec(
                idx=idx)
            all_requests_per_sec.append(requests_per_sec)
            summary[gateload]['Average requests per sec'] = requests_per_sec
            doc_counters = self.metric_helper.calc_gateload_doc_counters(
                idx=idx)
            summary[gateload]['gateload doc counters'] = doc_counters
        logger.info('Per node summary: {}'.format(pretty_dict(summary)))

        self.reporter.post_to_sf(round(np.mean(latencies[99]), 1))

        self.pass_fail = []
        for p, criterion in criteria.items():
            kpi = self.KPI.format(p)
            average = np.mean(latencies[p])
            if average == 0 or average > criterion:
                status = '{}: {} - doesn\'t meet the criteria of {}'\
                    .format(kpi, average, criterion)
            else:
                status = '{}: {} - meets the criteria of {}'\
                    .format(kpi, average, criterion)
            self.pass_fail.append(status)
        logger.info('Aggregated latency: {}'.format(pretty_dict(
            self.pass_fail)))

        network_matrix = self.metric_db_servers_helper.calc_network_throughput
        network_matrix['Avg requests  per sec'] = int(
            np.average(all_requests_per_sec))
        logger.info('Network throughput: {}'.format(
            json.dumps(network_matrix, indent=4)))

        logger.info('Checking pass or fail')
        if self.errors:
            logger.interrupt('Test failed because of errors')
        if 'doesn\'t meet' in ''.join(self.pass_fail):
            logger.interrupt(
                'Test failed because at least one of the latencies does not meet KPI'
            )
Beispiel #2
0
def post_benchmark(benchmark):
    if args.post_to_sf <= 0:
        logger.info("Dry run stats: {}\n".format(pretty_dict(benchmark)))
        return

    logger.info('Adding a benchmark: {}'.format(pretty_dict(benchmark)))
    requests.post('http://{}/api/v1/benchmarks'.format(StatsSettings.SHOWFAST),
                  json.dumps(benchmark))
def post_benchmark(benchmark):
    if args.post_to_sf <= 0:
        logger.info("Dry run stats: {}\n".format(pretty_dict(benchmark)))
        return

    logger.info('Adding a benchmark: {}'.format(pretty_dict(benchmark)))
    requests.post('http://{}/api/v1/benchmarks'.format(StatsSettings.SHOWFAST),
                  json.dumps(benchmark))
Beispiel #4
0
    def create_function(self, node: str, func: dict, name: str):
        logger.info('Creating function on node {}: {}'.format(
            node, pretty_dict(func)))

        api = 'http://{}:8091/_p/event/saveAppTempStore/?name={}'.format(
            node, name)
        self.post(url=api, data=json.dumps(func))
Beispiel #5
0
def main():
    args = get_args()

    infra_spec = ClusterSpec()
    infra_spec.parse(fname=args.cluster)

    if infra_spec.dynamic_infrastructure:
        infra_provider = infra_spec.infrastructure_settings['provider']

        if infra_provider == 'aws':
            deployer = AWSDeployer(infra_spec, args)
        elif infra_provider == 'azure':
            deployer = AzureDeployer(infra_spec, args)
        elif infra_provider == 'gcp':
            deployer = GCPDeployer(infra_spec, args)
        else:
            raise Exception("{} is not a valid infrastructure provider".format(
                infra_provider))

        try:
            deployer.deploy()
        except Exception as ex:
            with open(infra_spec.generated_cloud_config_path) as f:
                logger.info("infrastructure dump:\n{}".format(
                    pretty_dict(json.load(f))))
            raise ex
Beispiel #6
0
def main():
    args = get_args()

    cluster_spec = ClusterSpec()
    cluster_spec.parse(args.cluster_spec_fname)

    remote = RemoteHelper(cluster_spec, verbose=False)

    remote.collect_info()

    for hostname in cluster_spec.servers:
        for fname in glob.glob('{}/*.zip'.format(hostname)):
            shutil.move(fname, '{}.zip'.format(hostname))

    if cluster_spec.backup is not None:
        logs = os.path.join(cluster_spec.backup, 'logs')
        if os.path.exists(logs):
            shutil.make_archive('tools', 'zip', logs)

    failures = defaultdict(dict)

    for file_name in glob.iglob('./*.zip'):
        panic_files, crash_files, storage_corrupted = validate_logs(file_name)
        if panic_files:
            failures['panics'][file_name] = panic_files
        if crash_files:
            failures['crashes'][file_name] = crash_files
        if storage_corrupted:
            failures['storage_corrupted'][file_name] = True
            remote.collect_index_datafiles()

    if failures:
        logger.interrupt(
            "Following failures found: {}".format(pretty_dict(failures)))
Beispiel #7
0
 def create_index(self,
                  host: str,
                  bucket: str,
                  name: str,
                  field: str,
                  storage: str = 'memdb',
                  scope: str = '_default',
                  collection: str = '_default'):
     api = 'http://{}:9102/createIndex'.format(host)
     data = {
         'index': {
             'bucket': bucket,
             'scope': scope,
             'collection': collection,
             'using': storage,
             'name': name,
             'secExprs': ['`{}`'.format(field)],
             'exprType': 'N1QL',
             'isPrimary': False,
             'where': '',
             'deferred': False,
             'partitionKey': '',
             'partitionScheme': 'SINGLE',
         },
         'type': 'create',
         'version': 1,
     }
     logger.info('Creating index {}'.format(pretty_dict(data)))
     self.post(url=api, data=json.dumps(data))
Beispiel #8
0
    def validate_failures(self):
        for node in self.eventing_nodes:
            all_stats = self.rest.get_eventing_stats(node=node,
                                                     full_stats=True)

            req_stats = [{key: fun_stat[key]
                          for key in self.STAT_REQ_FIELDS}
                         for fun_stat in all_stats]

            logger.info("Required stats for {node} : {stats}".format(
                node=node, stats=pretty_dict(req_stats)))
            for function_stats in all_stats:
                execution_stats = function_stats["execution_stats"]
                failure_stats = function_stats["failure_stats"]

                # Validate Execution stats
                for stat, value in execution_stats.items():
                    if "failure" in stat and value != 0:
                        raise Exception(
                            '{function}: {node}: {stat} is not zero'.format(
                                function=function_stats["function_name"],
                                node=node,
                                stat=stat))

                # Validate Failure stats
                for stat, value in failure_stats.items():
                    if value != 0 and stat != "timestamp":
                        raise Exception(
                            '{function}: {node}: {stat} is not zero'.format(
                                function=function_stats["function_name"],
                                node=node,
                                stat=stat))
Beispiel #9
0
 def store_plans(self):
     logger.info('Storing query plans')
     for i, query in enumerate(self.test_config.access_settings.n1ql_queries):
         if self.test_config.collection.collection_map:
             query_statement = query['statement']
             for bucket in self.test_config.buckets:
                 if bucket in query_statement:
                     bucket_replaced = False
                     bucket_scopes = self.test_config.collection.collection_map[bucket]
                     for scope in bucket_scopes.keys():
                         for collection in bucket_scopes[scope].keys():
                             if bucket_scopes[scope][collection]["access"] == 1:
                                 query_target = "default:`{}`.`{}`.`{}`"\
                                     .format(bucket, scope, collection)
                                 replace_target = "`{}`".format(bucket)
                                 query_statement = query_statement.\
                                     replace(replace_target, query_target)
                                 bucket_replaced = True
                                 break
                         if bucket_replaced:
                             break
                     if not bucket_replaced:
                         raise Exception('No access target for bucket: {}'
                                         .format(bucket))
             logger.info("Grabbing plan for query: {}".format(query_statement))
             plan = self.rest.explain_n1ql_statement(self.query_nodes[0], query_statement)
         else:
             plan = self.rest.explain_n1ql_statement(self.query_nodes[0], query['statement'])
         with open('query_plan_{}.json'.format(i), 'w') as fh:
             fh.write(pretty_dict(plan))
Beispiel #10
0
    def measure_latency(self):
        logger.info('Measuring replication latency')
        timings = []
        found = lambda cb: [
            v for v in cb.observe(item).value if v.flags != OBS_NOTFOUND
        ]
        password = self.test_config.bucket.password
        for master in self.cluster_spec.yield_masters():
            for bucket in self.test_config.buckets:
                host, port = master.split(':')
                cb = Couchbase.connect(host=host, port=port,
                                       bucket=bucket, password=password)
                for _ in range(self.NUM_SAMPLES):
                    item = uhex()
                    cb.set(item, item)
                    t0 = time()
                    while len(found(cb)) != 2:
                        sleep(0.001)
                    latency = 1000 * (time() - t0)  # s -> ms
                    logger.info(latency)
                    timings.append(latency)

        summary = {
            'min': round(min(timings), 1),
            'max': round(max(timings), 1),
            'mean': round(np.mean(timings), 1),
            '80th': round(np.percentile(timings, 80), 1),
            '90th': round(np.percentile(timings, 90), 1),
            '95th': round(np.percentile(timings, 95), 1),
            '99th': round(np.percentile(timings, 99), 1),
        }
        logger.info(pretty_dict(summary))

        if hasattr(self, 'experiment'):
            self.experiment.post_results(summary['95th'])
Beispiel #11
0
 def configure_auto_compaction(self):
     compaction_settings = self.test_config.compaction
     for master in self.masters():
         self.rest.configure_auto_compaction(master, compaction_settings)
         settings = self.rest.get_auto_compaction_settings(master)
         logger.info('Auto-compaction settings: {}'
                     .format(pretty_dict(settings)))
    def _post_cluster(self):
        cluster = self.cluster_spec.parameters
        cluster['Name'] = self.cluster_spec.name

        logger.info('Adding a cluster: {}'.format(pretty_dict(cluster)))
        requests.post('http://{}/api/v1/clusters'.format(SHOWFAST_HOST),
                      json.dumps(cluster))
Beispiel #13
0
 def check_core_dumps(self) -> str:
     dumps_per_host = self.remote.detect_core_dumps()
     core_dumps = {
         host: dumps for host, dumps in dumps_per_host.items() if dumps
     }
     if core_dumps:
         return pretty_dict(core_dumps)
Beispiel #14
0
 def check_core_dumps(self):
     dumps_per_host = self.remote.detect_core_dumps()
     core_dumps = {
         host: dumps for host, dumps in dumps_per_host.items() if dumps
     }
     if core_dumps:
         logger.interrupt(pretty_dict(core_dumps))
Beispiel #15
0
    def validate_failures(self):
        ignore_failures = [
            "uv_try_write_failure_counter",
        ]
        for node in self.eventing_nodes:
            all_stats = self.rest.get_eventing_stats(node=node)

            logger.info("Stats for {node} : {stats}".format(
                node=node, stats=pretty_dict(all_stats)))
            for function_stats in all_stats:
                execution_stats = function_stats["execution_stats"]
                failure_stats = function_stats["failure_stats"]

                # Validate Execution stats
                for stat, value in execution_stats.items():
                    if "failure" in stat and value != 0 and stat not in ignore_failures:
                        raise Exception(
                            '{function}: {node}: {stat} is not zero'.format(
                                function=function_stats["function_name"],
                                node=node,
                                stat=stat))

                # Validate Failure stats
                for stat, value in failure_stats.items():
                    if value != 0 and stat != "timestamp":
                        raise Exception(
                            '{function}: {node}: {stat} is not zero'.format(
                                function=function_stats["function_name"],
                                node=node,
                                stat=stat))
Beispiel #16
0
    def create_bucket(self, host: str, name: str, password: str,
                      ram_quota: int, replica_number: int, replica_index: int,
                      eviction_policy: str, bucket_type: str,
                      conflict_resolution_type: str = None):
        logger.info('Adding new bucket: {}'.format(name))

        api = 'http://{}:8091/pools/default/buckets'.format(host)

        data = {
            'name': name,
            'bucketType': bucket_type,
            'ramQuotaMB': ram_quota,
            'evictionPolicy': eviction_policy,
            'flushEnabled': 1,
            'replicaNumber': replica_number,
            'authType': 'sasl',
            'saslPassword': password,
        }

        if bucket_type == BucketSettings.BUCKET_TYPE:
            data['replicaIndex'] = replica_index

        if conflict_resolution_type:
            data['conflictResolutionType'] = conflict_resolution_type

        logger.info('Bucket configuration: {}'.format(pretty_dict(data)))

        self.post(url=api, data=data)
Beispiel #17
0
    def _post_cluster(self):
        cluster = self.test.cluster_spec.parameters
        cluster['Name'] = self.test.cluster_spec.name

        logger.info('Adding a cluster: {}'.format(pretty_dict(cluster)))
        requests.post('http://{}/api/v1/clusters'.format(StatsSettings.SHOWFAST),
                      json.dumps(cluster))
Beispiel #18
0
    def measure_latency(self):
        logger.info('Measuring replication latency')
        timings = []
        found = lambda cb: [
            v for v in cb.observe(item).value if v.flags != OBS_NOTFOUND
        ]
        password = self.test_config.bucket.password
        for master in self.cluster_spec.yield_masters():
            for bucket in self.test_config.buckets:
                host, port = master.split(':')
                cb = Couchbase.connect(host=host, port=port,
                                       bucket=bucket, password=password)
                for _ in range(self.NUM_SAMPLES):
                    item = uhex()
                    cb.set(item, item)
                    t0 = time()
                    while len(found(cb)) != 2:
                        sleep(0.001)
                    latency = 1000 * (time() - t0)  # s -> ms
                    logger.info(latency)
                    timings.append(latency)

        summary = {
            'min': round(min(timings), 1),
            'max': round(max(timings), 1),
            'mean': round(np.mean(timings), 1),
            '80th': round(np.percentile(timings, 80), 1),
            '90th': round(np.percentile(timings, 90), 1),
            '95th': round(np.percentile(timings, 95), 1),
            '99th': round(np.percentile(timings, 99), 1),
        }
        logger.info(pretty_dict(summary))

        if hasattr(self, 'experiment'):
            self.experiment.post_results(summary['95th'])
Beispiel #19
0
 def configure_auto_compaction(self):
     compaction_settings = self.test_config.compaction
     for master in self.cluster_spec.masters:
         self.rest.configure_auto_compaction(master, compaction_settings)
         settings = self.rest.get_auto_compaction_settings(master)
         logger.info('Auto-compaction settings: {}'.format(
             pretty_dict(settings)))
Beispiel #20
0
    def create_bucket(self, host_port, name, password, ram_quota,
                      replica_number, replica_index, eviction_policy,
                      conflict_resolution_type=None):
        logger.info('Adding new bucket: {}'.format(name))

        api = 'http://{}/pools/default/buckets'.format(host_port)

        data = {
            'name': name,
            'bucketType': 'membase',
            'ramQuotaMB': ram_quota,
            'evictionPolicy': eviction_policy,
            'flushEnabled': 1,
            'replicaNumber': replica_number,
            'replicaIndex': replica_index,
            'authType': 'sasl',
            'saslPassword': password,
        }

        if conflict_resolution_type:
            data['conflict_resolution_type'] = conflict_resolution_type

        logger.info('Bucket configuration: {}'.format(misc.pretty_dict(data)))

        self.post(url=api, data=data)
Beispiel #21
0
    def create_fts_index_n1ql(self):
        logger.info("Creating FTS index")
        definition = read_json(
            self.test_config.index_settings.couchbase_fts_index_configfile)
        bucket_name = self.test_config.buckets[0]
        definition.update(
            {'name': self.test_config.index_settings.couchbase_fts_index_name})
        if self.test_config.collection.collection_map:
            collection_map = self.test_config.collection.collection_map
            definition["params"]["doc_config"][
                "mode"] = "scope.collection.type_field"
            scope_name = list(collection_map[bucket_name].keys())[1:][0]
            collection_name = list(
                collection_map[bucket_name][scope_name].keys())[0]
            ind_type_mapping = \
                copy.deepcopy(definition["params"]["mapping"]["default_mapping"])
            definition["params"]["mapping"]["default_mapping"][
                "enabled"] = False
            new_type_mapping_name = "{}.{}".format(scope_name, collection_name)
            definition["params"]["mapping"]["types"] = {
                new_type_mapping_name: ind_type_mapping
            }

        logger.info('Index definition: {}'.format(pretty_dict(definition)))
        self.rest.create_fts_index(
            self.fts_nodes[0],
            self.test_config.index_settings.couchbase_fts_index_name,
            definition)
        self.monitor.monitor_fts_indexing_queue(
            self.fts_nodes[0],
            self.test_config.index_settings.couchbase_fts_index_name,
            int(self.test_config.access_settings.items))
Beispiel #22
0
    def create_fts_indexes(self):
        less_words = True
        for bkt in self.test_config.buckets:
            definition = read_json(self.access.couchbase_index_configfile)
            if less_words:
                name = "fts_less_words"
                less_words = False
            else:
                name = "fts_more_words"
                less_words = True
            index_name = bkt + "-" + name
            definition.update({
                'name': index_name,
                'sourceName': bkt,
            })
            mapping = definition["params"]["mapping"]["default_mapping"]

            prop = definition["params"]["mapping"]["default_mapping"][
                "properties"]
            index = prop["fts_less_words"]
            new_prop = {name: index}
            mapping.update({'properties': new_prop})

            index = \
                definition["params"]["mapping"]["default_mapping"]["properties"][name]["fields"][0]
            index.update({'name': name})

            logger.info('Index definition: {}'.format(pretty_dict(definition)))
            self.rest.create_fts_index(self.fts_master_node, index_name,
                                       definition)
            self.monitor.monitor_fts_indexing_queue(
                self.fts_nodes[0], index_name,
                int(self.test_config.access_settings.items * 0.95))
            self.monitor.monitor_fts_index_persistence(self.fts_nodes,
                                                       index_name, bkt)
Beispiel #23
0
    def configure_auto_compaction(self):
        compaction_settings = self.test_config.compaction
        if self.dynamic_infra:
            cluster = self.remote.get_cluster()
            db = int(compaction_settings.db_percentage)
            view = int(compaction_settings.view_percentage)
            para = bool(str(compaction_settings.parallel).lower())

            auto_compaction = cluster['spec']['cluster']\
                .get('autoCompaction',
                     {'databaseFragmentationThreshold': {'percent': 30},
                      'viewFragmentationThreshold': {'percent': 30},
                      'parallelCompaction': False})

            db_percent = auto_compaction.get('databaseFragmentationThreshold',
                                             {'percent': 30})
            db_percent['percent'] = db
            auto_compaction['databaseFragmentationThreshold'] = db_percent

            views_percent = auto_compaction.get('viewFragmentationThreshold',
                                                {'percent': 30})
            views_percent['percent'] = view
            auto_compaction['viewFragmentationThreshold'] = views_percent
            auto_compaction['parallelCompaction'] = para

            self.remote.update_cluster_config(cluster)
        else:
            for master in self.cluster_spec.masters:
                self.rest.configure_auto_compaction(master,
                                                    compaction_settings)
                settings = self.rest.get_auto_compaction_settings(master)
                logger.info('Auto-compaction settings: {}'.format(
                    pretty_dict(settings)))
Beispiel #24
0
    def start_gateload(self, test_config):
        logger.info('Starting gateload')
        _if = self.detect_if()
        local_ip = self.detect_ip(_if)
        index = self.cluster_spec.gateloads.index(local_ip)

        with open('templates/gateload_config_template.json') as fh:
            template = json.load(fh)

        template['Hostname'] = self.cluster_spec.gateways[index]
        template['UserOffset'] = (
            test_config.gateload_settings.pushers +
            test_config.gateload_settings.pullers) * index
        template['NumPullers'] = test_config.gateload_settings.pullers
        template['NumPushers'] = test_config.gateload_settings.pushers

        config_fname = 'templates/gateload_config_{}.json'.format(index)
        with open(config_fname, 'w') as fh:
            fh.write(pretty_dict(template))
        put(config_fname, '/root/gateload_config.json')

        run(
            'ulimit -n 65536; nohup /opt/gocode/bin/gateload '
            '-workload /root/gateload_config.json &>/root/gateload.log&',
            pty=False)
Beispiel #25
0
 def save_expvar(self):
     for idx, gateway_ip in enumerate(self.test.remote.gateways,
                                      start=1):
         expvar = self.test.request_helper.collect_expvar(gateway_ip)
         fname = 'gateway_expvar_{}.json'.format(idx)
         with open(fname, 'w') as fh:
             fh.write(pretty_dict(expvar))
Beispiel #26
0
    def _print_amplifications(self, old_stats, now_stats, now_ops, doc_size,
                              stat_type):
        ampl_stats = dict()
        for server in self.rest.get_active_nodes_by_role(
                self.master_node, "kv"):
            if (server not in now_stats.keys()) or (server
                                                    not in old_stats.keys()):
                logger.info("{} stats for {} not found!".format(
                    stat_type, server))
                continue
            get_ops = now_ops[server]["get_ops"] - self.disk_ops[server][
                "get_ops"]
            set_ops = now_ops[server]["set_ops"] - self.disk_ops[server][
                "set_ops"]
            if set_ops:
                ampl_stats["write_amp"] = \
                    (now_stats[server]["nwb"] - old_stats[server]["nwb"]) / \
                    (set_ops * doc_size)
                ampl_stats["write_io_per_set"] = \
                    (now_stats[server]["nw"] - old_stats[server]["nw"]) / set_ops
                ampl_stats["read_bytes_per_set"] = \
                    (now_stats[server]["nrb"] - old_stats[server]["nrb"]) / set_ops
                ampl_stats["read_io_per_set"] = \
                    (now_stats[server]["nr"] - old_stats[server]["nr"]) / set_ops
            if get_ops:
                ampl_stats["read_amp"] = \
                    (now_stats[server]["nr"] - old_stats[server]["nr"]) / get_ops
                ampl_stats["read_bytes_per_get"] = \
                    (now_stats[server]["nrb"] - old_stats[server]["nrb"]) / get_ops

            logger.info("{} Amplification stats for {}: {}".format(
                stat_type, server, pretty_dict(ampl_stats)))
            logger.info("Note: read_bytes_per_set and read_io_per_set are "
                        "valid for set only workload.")
Beispiel #27
0
def store_metrics(statement: str, metrics: dict):
    with open('tpcds.log', 'a') as fh:
        fh.write(pretty_dict({
            'statement': statement,
            'metrics': metrics,
        }))
        fh.write('\n')
Beispiel #28
0
    def set_services(self, host: str, services: str):
        logger.info('Configuring services on {}: {}'
                    .format(host, pretty_dict(services)))

        api = 'http://{}:8091/node/controller/setupServices'.format(host)
        data = {'services': services}
        self.post(url=api, data=data)
Beispiel #29
0
 def check_core_dumps(self) -> str:
     dumps_per_host = self.remote.detect_core_dumps()
     core_dumps = {
         host: dumps for host, dumps in dumps_per_host.items() if dumps
     }
     if core_dumps:
         return pretty_dict(core_dumps)
Beispiel #30
0
    def set_services(self, host_port, services):
        logger.info('Configuring services on master node {}: {}'
                    .format(host_port, misc.pretty_dict(services)))

        api = 'http://{}/node/controller/setupServices'.format(host_port)
        data = {'services': services}
        self.post(url=api, data=data)
Beispiel #31
0
    def validate_failures(self):
        ignore_failures = ["uv_try_write_failure_counter", ]
        for node in self.eventing_nodes:
            all_stats = self.rest.get_eventing_stats(node=node)

            logger.info("Stats for {node} : {stats}"
                        .format(node=node,
                                stats=pretty_dict(all_stats)))
            for function_stats in all_stats:
                execution_stats = function_stats["execution_stats"]
                failure_stats = function_stats["failure_stats"]

                # Validate Execution stats
                for stat, value in execution_stats.items():
                    if "failure" in stat and value != 0 and stat not in ignore_failures:
                        raise Exception(
                            '{function}: {node}: {stat} is not zero'.format(
                                function=function_stats["function_name"], node=node, stat=stat))

                # Validate Failure stats
                for stat, value in failure_stats.items():
                    if value != 0 and stat != "timestamp":
                        raise Exception(
                            '{function}: {node}: {stat} is not zero'.format(
                                function=function_stats["function_name"], node=node, stat=stat))
Beispiel #32
0
 def set_index_settings(self):
     settings = self.test_config.gsi_settings.settings
     for server in self.cluster_spec.servers_by_role('index'):
         if settings:
             self.rest.set_index_settings(server, settings)
         curr_settings = self.rest.get_index_settings(server)
         curr_settings = pretty_dict(curr_settings)
         logger.info("Index settings: {}".format(curr_settings))
Beispiel #33
0
    def store_plans(self):
        logger.info('Storing query plans')

        for i, query in enumerate(self.test_config.access_settings.n1ql_queries):
            plan = self.rest.explain_n1ql_statement(self.query_nodes[0],
                                                    query['statement'])
            with open('query_plan_{}.json'.format(i), 'w') as fh:
                fh.write(pretty_dict(plan))
Beispiel #34
0
 def set_index_settings(self):
     settings = self.test_config.secondaryindex_settings.settings
     for _, servers in self.cluster_spec.yield_servers_by_role('index'):
         for server in servers:
             if settings:
                 self.rest.set_index_settings(server, settings)
             curr_settings = self.rest.get_index_settings(server)
             curr_settings = pretty_dict(curr_settings)
             logger.info("Index settings: {}".format(curr_settings))
Beispiel #35
0
 def set_query_settings(self):
     query_nodes = self.cluster_spec.servers_by_role('n1ql')
     if query_nodes:
         settings = self.test_config.n1ql_settings.settings
         if settings:
             self.rest.set_query_settings(query_nodes[0], settings)
         settings = self.rest.get_query_settings(query_nodes[0])
         settings = pretty_dict(settings)
         logger.info('Query settings: {}'.format(settings))
Beispiel #36
0
 def run(self):
     super(BandwidthTest, self).run()
     self.metric_db_servers_helper = MetricHelper(self)
     network_matrix = self.metric_db_servers_helper.calc_network_bandwidth
     logger.info(
         'Network bandwidth: {}'.format(pretty_dict(network_matrix))
     )
     if self.test_config.stats_settings.enabled:
         self.reporter.post_to_sf(network_matrix)
Beispiel #37
0
 def get_on_update_success(self):
     on_update_success = 0
     for node in self.eventing_nodes:
         stats = self.rest.get_eventing_stats(node=node)
         for stat in stats:
             logger.info("Execution stats for {node}: {stats}"
                         .format(node=node,
                                 stats=pretty_dict(stat["execution_stats"])))
             on_update_success += stat["execution_stats"]["on_update_success"]
     return on_update_success
Beispiel #38
0
 def run_phase(self,
               phase: str,
               task: Callable, settings: PhaseSettings,
               target_iterator: Iterable,
               timer: int = None,
               wait: bool = True):
     logger.info('Running {}: {}'.format(phase, pretty_dict(settings)))
     self.worker_manager.run_tasks(task, settings, target_iterator, timer)
     if wait:
         self.worker_manager.wait_for_workers()
Beispiel #39
0
 def set_query_settings(self):
     logger.info('Setting query settings')
     query_nodes = self.cluster_spec.servers_by_role('n1ql')
     if query_nodes:
         settings = self.test_config.n1ql_settings.cbq_settings
         if settings:
             self.rest.set_query_settings(query_nodes[0], settings)
         settings = self.rest.get_query_settings(query_nodes[0])
         settings = pretty_dict(settings)
         logger.info('Query settings: {}'.format(settings))
Beispiel #40
0
 def run_phase(self,
               phase: str,
               task: Callable, settings: PhaseSettings,
               target_iterator: Iterable,
               timer: int = None,
               wait: bool = True):
     logger.info('Running {}: {}'.format(phase, pretty_dict(settings)))
     self.worker_manager.run_tasks(task, settings, target_iterator, timer)
     if wait:
         self.worker_manager.wait_for_workers()
Beispiel #41
0
 def get_timer_msg_counter(self):
     timer_msg_counter = 0
     for node in self.eventing_nodes:
         stats = self.rest.get_eventing_stats(node=node)
         for stat in stats:
             logger.info("Execution stats for {node}: {stats}"
                         .format(node=node,
                                 stats=pretty_dict(stat["execution_stats"])))
             timer_msg_counter += stat["execution_stats"]["timer_msg_counter"]
     return timer_msg_counter
Beispiel #42
0
 def get_on_update_success(self):
     on_update_success = 0
     for node in self.eventing_nodes:
         stats = self.rest.get_eventing_stats(node=node)
         for stat in stats:
             logger.info("Execution stats for {node}: {stats}".format(
                 node=node, stats=pretty_dict(stat["execution_stats"])))
             on_update_success += stat["execution_stats"][
                 "on_update_success"]
     return on_update_success
Beispiel #43
0
 def get_timer_msg_counter(self):
     timer_msg_counter = 0
     for node in self.eventing_nodes:
         stats = self.rest.get_eventing_stats(node=node)
         for stat in stats:
             logger.info("Execution stats for {node}: {stats}".format(
                 node=node, stats=pretty_dict(stat["execution_stats"])))
             timer_msg_counter += stat["execution_stats"][
                 "timer_msg_counter"]
     return timer_msg_counter
Beispiel #44
0
 def run_sg_phase(self,
                  phase: str,
                  task: Callable,
                  settings: PhaseSettings,
                  timer: int = None,
                  distribute: bool = False) -> None:
     logger.info('Running {}: {}'.format(phase, pretty_dict(settings)))
     self.worker_manager.run_sg_tasks(task, settings, timer, distribute,
                                      phase)
     self.worker_manager.wait_for_workers()
Beispiel #45
0
    def set_index_settings(self):
        logger.info('Setting index settings')
        index_nodes = self.cluster_spec.servers_by_role('index')
        if index_nodes:
            settings = self.test_config.gsi_settings.settings
            if settings:
                self.rest.set_index_settings(index_nodes[0], settings)

            settings = self.rest.get_index_settings(index_nodes[0])
            settings = pretty_dict(settings)
            logger.info('Index settings: {}'.format(settings))
Beispiel #46
0
 def get_timer_responses(self):
     doc_timer_responses = 0
     for node in self.eventing_nodes:
         stats = self.rest.get_eventing_stats(node=node)
         for stat in stats:
             logger.info("Event processing stats for {node}: {stats}"
                         .format(node=node,
                                 stats=pretty_dict(stat["event_processing_stats"])))
             doc_timer_responses += \
                 stat["event_processing_stats"]["timer_responses_received"]
     return doc_timer_responses
Beispiel #47
0
 def create_index(self):
     definition = read_json(self.access.couchbase_index_configfile)
     definition.update({
         'name': self.access.couchbase_index_name,
         'sourceName': self.test_config.buckets[0],
     })
     if self.access.couchbase_index_type:
         definition["params"]["store"]["indexType"] = self.access.couchbase_index_type
     logger.info('Index definition: {}'.format(pretty_dict(definition)))
     self.rest.create_fts_index(self.fts_master_node,
                                self.access.couchbase_index_name, definition)
Beispiel #48
0
    def post_results(self, value):
        self.update_defaults()

        key = uhex()
        self.experiment['value'] = value
        self.experiment['defaults'] = self.name

        logger.info('Adding new experiment {}: {}'.format(
            key, pretty_dict(self.experiment)
        ))
        cb = Couchbase.connect(bucket='experiments', **SF_STORAGE)
        cb.set(key, self.experiment)
Beispiel #49
0
def main():
    failures = defaultdict(dict)

    for file_name in glob.iglob('./*.zip'):
        panic_files, crash_files = validate_logs(file_name)
        if panic_files:
            failures['panics'][file_name] = panic_files
        if crash_files:
            failures['crashes'][file_name] = crash_files

    if failures:
        logger.interrupt(
            "Following failures found: {}".format(pretty_dict(failures)))
Beispiel #50
0
    def _post_metric(self, metric: JSON):
        if 'category' not in metric:
            metric['category'] = self.test_config.showfast.category

        metric.update({
            'cluster': self.cluster_spec.name,
            'component': self.test_config.showfast.component,
            'subCategory': self.test_config.showfast.sub_category,
        })

        logger.info('Adding a metric: {}'.format(pretty_dict(metric)))
        requests.post('http://{}/api/v1/metrics'.format(SHOWFAST_HOST),
                      json.dumps(metric))
Beispiel #51
0
    def _post_metric(self, metric, metric_info):
        if metric_info is None:
            metric_info = {'title': self.test.test_config.test_case.title}
        metric_info['id'] = metric
        metric_info['cluster'] = self.test.cluster_spec.name
        metric_info['component'] = self.test.test_config.test_case.component
        metric_info['category'] = \
            metric_info.get('category', self.test.test_config.test_case.category)
        metric_info['subCategory'] = self.test.test_config.test_case.sub_category

        logger.info('Adding a metric: {}'.format(pretty_dict(metric_info)))
        requests.post('http://{}/api/v1/metrics'.format(StatsSettings.SHOWFAST),
                      json.dumps(metric_info))
Beispiel #52
0
    def start_sync_gateways(self):
        with open('templates/gateway_config_template.json') as fh:
            template = json.load(fh)

        db_master = self.cluster_spec.yield_masters().next()
        template['databases']['db']['server'] = "http://*****:*****@{}/".format(db_master)
        template['maxIncomingConnections'] = self.test_config.gateway_settings.conn_in
        template['maxCouchbaseConnections'] = self.test_config.gateway_settings.conn_db
        template['CompressResponses'] = self.test_config.gateway_settings.compression

        with open('templates/gateway_config.json', 'w') as fh:
            fh.write(pretty_dict(template))
        self.remote_helper.start_sync_gateway()
Beispiel #53
0
    def access(self):
        self.workload = self.test_config.access_settings

        self.remote.start_bandwidth_monitor(int(self.workload.time / 60) + 1)
        self.access_bg()
        time.sleep(self.workload.time + 60)
        self.metric_db_servers_helper = MetricHelper(self)
        self.network_matrix_subdoc = self.metric_db_servers_helper.calc_network_bandwidth
        logger.info(
            'Network bandwidth for subdoc: {}'.format(pretty_dict(self.network_matrix_subdoc))
        )

        self.remote.kill_process('iptraf')

        self.remote.start_bandwidth_monitor(int(self.workload.time / 60) + 1)
        self.workload.workers = self.workload.subdoc_workers
        self.workload.subdoc_workers = 0
        self.access_bg(self.workload)
        time.sleep(self.workload.time + 60)
        self.metric_db_servers_helper = MetricHelper(self)
        self.network_matrix_full = self.metric_db_servers_helper.calc_network_bandwidth
        logger.info(
            'Network bandwidth for full doc: {}'.format(pretty_dict(self.network_matrix_full))
        )
Beispiel #54
0
    def report_stats(self, num_buckets):
        cpu = lambda data: round(np.mean(data), 1)
        rss = lambda data: int(np.mean(data) / 1024 ** 2)
        conn = lambda data: int(np.mean(data))

        summary = {}
        for hostname, s in self.rest.get_node_stats(self.master_node,
                                                    'bucket-1'):
            summary[hostname] = {
                'memcached, MBytes': rss(s['proc/memcached/mem_resident']),
                'beam.smp, MBytes': rss(s['proc/(main)beam.smp/mem_resident']),
                'Total CPU, %': cpu(s['cpu_utilization_rate']),
                'Curr. connections': conn(s['curr_connections']),
            }
        self.results[num_buckets] = summary[self.master_node]
        logger.info(pretty_dict(summary))
Beispiel #55
0
 def create_index(self, host: str, bucket: str, name: str, field: str,
                  storage: str = 'memdb'):
     api = 'http://{}:9102/createIndex'.format(host)
     data = {
         'index': {
             'bucket': bucket,
             'using': storage,
             'name': name,
             'secExprs': ['`{}`'.format(field)],
             'exprType': 'N1QL',
             'isPrimary': False,
             'where': '',
             'deferred': False,
             'partitionKey': '',
             'partitionScheme': 'SINGLE',
         },
         'type': 'create',
         'version': 1,
     }
     logger.info('Creating index {}'.format(pretty_dict(data)))
     self.post(url=api, data=json.dumps(data))
Beispiel #56
0
    def create_fts_indexes(self):
        less_words = True
        for bkt in self.test_config.buckets:
            definition = read_json(self.access.couchbase_index_configfile)
            if less_words:
                name = "fts_less_words"
                less_words = False
            else:
                name = "fts_more_words"
                less_words = True
            index_name = bkt + "-" + name
            definition.update({
                'name': index_name,
                'sourceName': bkt,
            })
            mapping = definition["params"]["mapping"]["default_mapping"]

            prop = definition["params"]["mapping"]["default_mapping"]["properties"]
            index = prop["fts_less_words"]
            new_prop = {name: index}
            mapping.update({
                'properties': new_prop
            })

            index = \
                definition["params"]["mapping"]["default_mapping"]["properties"][name]["fields"][0]
            index.update({
                'name': name
            })

            logger.info('Index definition: {}'.format(pretty_dict(definition)))
            self.rest.create_fts_index(self.fts_master_node,
                                       index_name, definition)
            self.monitor.monitor_fts_indexing_queue(self.fts_nodes[0],
                                                    index_name,
                                                    int(self.test_config.access_settings.items *
                                                        0.95))
            self.monitor.monitor_fts_index_persistence(self.fts_nodes,
                                                       index_name, bkt)
Beispiel #57
0
    def create_bucket(self,
                      host: str,
                      name: str,
                      password: str,
                      ram_quota: int,
                      replica_number: int,
                      replica_index: int,
                      eviction_policy: str,
                      bucket_type: str,
                      conflict_resolution_type: str = None,
                      compression_mode: str = None):
        logger.info('Adding new bucket: {}'.format(name))

        api = 'http://{}:8091/pools/default/buckets'.format(host)

        data = {
            'name': name,
            'bucketType': bucket_type,
            'ramQuotaMB': ram_quota,
            'evictionPolicy': eviction_policy,
            'flushEnabled': 1,
            'replicaNumber': replica_number,
            'authType': 'sasl',
            'saslPassword': password,
        }

        if bucket_type == BucketSettings.BUCKET_TYPE:
            data['replicaIndex'] = replica_index

        if conflict_resolution_type:
            data['conflictResolutionType'] = conflict_resolution_type

        if compression_mode:
            data['compressionMode'] = compression_mode

        logger.info('Bucket configuration: {}'.format(pretty_dict(data)))

        self.post(url=api, data=data)