Ejemplo n.º 1
0
    def __init__(self, test, verbose):
        self.clusters = OrderedDict()
        self.remote = RemoteHelper(test.cluster_spec, test.test_config,
                                   verbose=verbose)

        for cluster_name, servers in test.cluster_spec.yield_clusters():
            cluster = '{}_{}_{}'.format(cluster_name,
                                        test.build.replace('.', ''),
                                        uhex()[:3])
            master = servers[0].split(':')[0]
            self.clusters[cluster] = master
        if test.test_config.test_case.monitor_clients:
            for node in test.cluster_spec.workers:
                cluster = '{}{}'.format(self.clusters.items()[0][0][:-3], uhex()[:3])
                master = node.split(':')[0]
                self.clusters[cluster] = master

        self.index_node = ''
        for _, servers in test.cluster_spec.yield_servers_by_role('index'):
            if servers:
                self.index_node = servers[0].split(':')[0]

        if hasattr(test, 'ALL_BUCKETS'):
            buckets = None
        else:
            buckets = test.test_config.buckets[:1]
        if hasattr(test, 'ALL_HOSTNAMES'):
            hostnames = tuple(test.cluster_spec.yield_hostnames())
        else:
            hostnames = None

        self.settings = type('settings', (object,), {
            'seriesly_host': test.test_config.stats_settings.seriesly['host'],
            'cbmonitor_host_port': test.test_config.stats_settings.cbmonitor['host'],
            'interval': test.test_config.stats_settings.interval,
            'secondary_statsfile': test.test_config.stats_settings.secondary_statsfile,
            'buckets': buckets,
            'hostnames': hostnames,
            'sync_gateway_nodes':
                test.remote.gateways if test.remote else None,
            'monitor_clients':
                test.cluster_spec.workers if test.test_config.test_case.monitor_clients else None,
            'fts_server': test.test_config.test_case.fts_server
        })()
        self.lat_interval = test.test_config.stats_settings.lat_interval
        if test.cluster_spec.ssh_credentials:
            self.settings.ssh_username, self.settings.ssh_password = \
                test.cluster_spec.ssh_credentials
        self.settings.rest_username, self.settings.rest_password = \
            test.cluster_spec.rest_credentials
        self.settings.bucket_password = test.test_config.bucket.password

        self.settings.index_node = self.index_node

        self.collectors = []
        self.processes = []
        self.snapshots = []
        self.fts_stats = None
Ejemplo n.º 2
0
    def __init__(self, test):
        self.clusters = OrderedDict()
        self.remote = RemoteHelper(test.cluster_spec, test.test_config, verbose=True)

        for cluster_name, servers in test.cluster_spec.yield_clusters():
            cluster = "{}_{}_{}".format(cluster_name, test.build.replace(".", ""), uhex()[:3])
            master = servers[0].split(":")[0]
            self.clusters[cluster] = master
        if test.test_config.test_case.monitor_clients:
            for node in test.cluster_spec.workers:
                cluster = "{}{}".format(self.clusters.items()[0][0][:-3], uhex()[:3])
                master = node.split(":")[0]
                self.clusters[cluster] = master

        self.index_node = ""
        for _, servers in test.cluster_spec.yield_servers_by_role("index"):
            if servers:
                self.index_node = servers[0].split(":")[0]

        if hasattr(test, "ALL_BUCKETS"):
            buckets = None
        else:
            buckets = test.test_config.buckets[:1]
        if hasattr(test, "ALL_HOSTNAMES"):
            hostnames = tuple(test.cluster_spec.yield_hostnames())
        else:
            hostnames = None

        self.settings = type(
            "settings",
            (object,),
            {
                "seriesly_host": test.test_config.stats_settings.seriesly["host"],
                "cbmonitor_host_port": test.test_config.stats_settings.cbmonitor["host"],
                "interval": test.test_config.stats_settings.interval,
                "secondary_statsfile": test.test_config.stats_settings.secondary_statsfile,
                "buckets": buckets,
                "hostnames": hostnames,
                "sync_gateway_nodes": test.remote.gateways if test.remote else None,
                "monitor_clients": test.cluster_spec.workers if test.test_config.test_case.monitor_clients else None,
            },
        )()
        self.lat_interval = test.test_config.stats_settings.lat_interval
        if test.cluster_spec.ssh_credentials:
            self.settings.ssh_username, self.settings.ssh_password = test.cluster_spec.ssh_credentials
        self.settings.rest_username, self.settings.rest_password = test.cluster_spec.rest_credentials
        self.settings.bucket_password = test.test_config.bucket.password

        self.settings.index_node = self.index_node

        self.collectors = []
        self.processes = []
        self.snapshots = []
        self.bandwidth = False
Ejemplo n.º 3
0
    def __init__(self, test):
        self.clusters = OrderedDict()
        for cluster_name, servers in test.cluster_spec.yield_clusters():
            cluster = '{}_{}_{}'.format(cluster_name,
                                        test.build.replace('.', ''),
                                        uhex()[:3])
            master = servers[0].split(':')[0]
            self.clusters[cluster] = master

        if hasattr(test, 'ALL_BUCKETS'):
            buckets = None
        else:
            buckets = test.test_config.buckets[:1]
        if hasattr(test, 'ALL_HOSTNAMES'):
            hostnames = tuple(test.cluster_spec.yield_hostnames())
        else:
            hostnames = None

        self.settings = type(
            'settings', (object, ), {
                'seriesly_host': CBMONITOR_HOST,
                'cbmonitor_host_port': CBMONITOR_HOST,
                'interval': test.test_config.stats_settings.interval,
                'buckets': buckets,
                'hostnames': hostnames,
            })()
        self.lat_interval = test.test_config.stats_settings.lat_interval
        self.settings.ssh_username, self.settings.ssh_password = \
            test.cluster_spec.ssh_credentials
        self.settings.rest_username, self.settings.rest_password = \
            test.cluster_spec.rest_credentials

        self.collectors = []
        self.processes = []
        self.snapshots = []
Ejemplo n.º 4
0
    def measure_latency(self):
        logger.info('Measuring replication latency')
        timings = []
        found = lambda cb: [
            v for v in cb.observe(item).value if v.flags != OBS_NOTFOUND
        ]
        password = self.test_config.bucket.password
        for master in self.cluster_spec.yield_masters():
            for bucket in self.test_config.buckets:
                host, port = master.split(':')
                cb = Couchbase.connect(host=host, port=port,
                                       bucket=bucket, password=password)
                for _ in range(self.NUM_SAMPLES):
                    item = uhex()
                    cb.set(item, item)
                    t0 = time()
                    while len(found(cb)) != 2:
                        sleep(0.001)
                    latency = 1000 * (time() - t0)  # s -> ms
                    logger.info(latency)
                    timings.append(latency)

        summary = {
            'min': round(min(timings), 1),
            'max': round(max(timings), 1),
            'mean': round(np.mean(timings), 1),
            '80th': round(np.percentile(timings, 80), 1),
            '90th': round(np.percentile(timings, 90), 1),
            '95th': round(np.percentile(timings, 95), 1),
            '99th': round(np.percentile(timings, 99), 1),
        }
        logger.info(pretty_dict(summary))

        if hasattr(self, 'experiment'):
            self.experiment.post_results(summary['95th'])
Ejemplo n.º 5
0
    def _generate_benchmark(self,
                            metric: str,
                            value: Union[float, int],
                            snapshots: List[str]) -> JSON:

        if self.test_config.sdktesting_settings.enable_sdktest:
            self.sdk_type = self.test_config.sdktesting_settings.sdk_type[-1]

            if self.sdk_type == 'java':
                self.sdk_version = self.test_config.ycsb_settings.sdk_version
            elif self.sdk_type == 'libc':
                self.sdk_version = self.test_config.client_settings.libcouchbase
            elif self.sdk_type == 'python':
                self.sdk_version = self.test_config.client_settings.python_client

            self.build = self.sdk_version + ' : ' + self.build

        if self.test_config.access_settings.show_tls_version:
            self.build = self.rest.get_minimum_tls_version(self.master_node) + ' : ' + self.build
            logger.info('build: {}'.format(self.build))

        return {
            'build': self.build,
            'buildURL': os.environ.get('BUILD_URL'),
            'dateTime': time.strftime('%Y-%m-%d %H:%M'),
            'id': uhex(),
            'metric': metric,
            'snapshots': snapshots,
            'value': value,
        }
Ejemplo n.º 6
0
    def measure_latency(self):
        logger.info('Measuring replication latency')
        timings = []
        found = lambda cb: [
            v for v in cb.observe(item).value if v.flags != OBS_NOTFOUND
        ]
        password = self.test_config.bucket.password
        for master in self.cluster_spec.yield_masters():
            for bucket in self.test_config.buckets:
                host, port = master.split(':')
                cb = Couchbase.connect(host=host, port=port,
                                       bucket=bucket, password=password)
                for _ in range(self.NUM_SAMPLES):
                    item = uhex()
                    cb.set(item, item)
                    t0 = time()
                    while len(found(cb)) != 2:
                        sleep(0.001)
                    latency = 1000 * (time() - t0)  # s -> ms
                    logger.info(latency)
                    timings.append(latency)

        summary = {
            'min': round(min(timings), 1),
            'max': round(max(timings), 1),
            'mean': round(np.mean(timings), 1),
            '80th': round(np.percentile(timings, 80), 1),
            '90th': round(np.percentile(timings, 90), 1),
            '95th': round(np.percentile(timings, 95), 1),
            '99th': round(np.percentile(timings, 99), 1),
        }
        logger.info(pretty_dict(summary))

        if hasattr(self, 'experiment'):
            self.experiment.post_results(summary['95th'])
Ejemplo n.º 7
0
    def __init__(self, test):
        self.clusters = OrderedDict()
        for cluster_name, servers in test.cluster_spec.yield_clusters():
            cluster = '{}_{}_{}'.format(cluster_name,
                                        test.build.replace('.', ''),
                                        uhex()[:3])
            master = servers[0].split(':')[0]
            self.clusters[cluster] = master

        if hasattr(test, 'ALL_BUCKETS'):
            buckets = None
        else:
            buckets = test.test_config.buckets[:1]
        if hasattr(test, 'ALL_HOSTNAMES'):
            hostnames = tuple(test.cluster_spec.yield_hostnames())
        else:
            hostnames = None

        self.settings = type('settings', (object, ), {
            'seriesly_host': CBMONITOR_HOST,
            'cbmonitor_host_port': CBMONITOR_HOST,
            'interval': test.test_config.stats_settings.interval,
            'buckets': buckets,
            'hostnames': hostnames,
        })()
        self.lat_interval = test.test_config.stats_settings.lat_interval
        self.settings.ssh_username, self.settings.ssh_password = \
            test.cluster_spec.ssh_credentials
        self.settings.rest_username, self.settings.rest_password = \
            test.cluster_spec.rest_credentials

        self.collectors = []
        self.processes = []
        self.snapshots = []
Ejemplo n.º 8
0
class WorkerSettings(PhaseSettings):

    REUSE_WORKSPACE = 'false'
    WORKSPACE_DIR = '/tmp/{}'.format(uhex()[:12])

    def __init__(self, options):
        self.reuse_worker = options.get('reuse_workspace', self.REUSE_WORKSPACE)
        self.worker_dir = options.get('workspace_location', self.WORKSPACE_DIR)
Ejemplo n.º 9
0
 def _generate_benchmark(self, metric, value):
     return {
         'build': self.test.build,
         'buildURL': os.environ.get('BUILD_URL'),
         'dateTime': time.strftime('%Y-%m-%d %H:%M'),
         'id': uhex(),
         'metric': metric,
         'snapshots': self.test.snapshots,
         'value': value,
     }
Ejemplo n.º 10
0
    def __init__(self, cluster_spec, test_config):
        self.cluster_spec = cluster_spec
        self.buckets = test_config.buckets

        self.temp_dir = '/tmp/{}'.format(uhex()[:12])
        logger.info("Using prefix for temp_dir (worker_dir): {}".format(self.temp_dir))
        self.user, self.password = cluster_spec.client_credentials
        with settings(user=self.user, password=self.password):
            self.initialize_project()
            self.start()
Ejemplo n.º 11
0
    def init_clusters(self, phase: str):
        self.cluster_map = OrderedDict()

        for cluster_name, servers in self.test.cluster_spec.clusters:
            cluster_id = '{}_{}_{}_{}'.format(cluster_name,
                                              self.test.build.replace('.', ''),
                                              phase,
                                              uhex()[:4])
            self.cluster_map[cluster_id] = servers[0]
        self.test.cbmonitor_clusters = list(self.cluster_map.keys())
Ejemplo n.º 12
0
    def init_clusters(self, phase: str):
        self.cluster_map = OrderedDict()

        for cluster_name, servers in self.test.cluster_spec.clusters:
            cluster_id = '{}_{}_{}_{}'.format(cluster_name,
                                              self.test.build.replace('.', ''),
                                              phase,
                                              uhex()[:4])
            self.cluster_map[cluster_id] = servers[0]
        self.test.cbmonitor_clusters = list(self.cluster_map.keys())
Ejemplo n.º 13
0
def post_initial(initial_time):
    metric_id = get_metric_id()
    data = {
        'build': args.version,
        'buildURL': os.environ.get('BUILD_URL'),
        'dateTime': time.strftime('%Y-%m-%d %H:%M'),
        'id': uhex(),
        'metric': 'secondary_fdb_standalone_{}_ini_nyx'.format(metric_id),
        'value': initial_time,
    }
    post_benchmark(data)
Ejemplo n.º 14
0
    def post_results(self, value):
        self.update_defaults()

        key = uhex()
        self.experiment['value'] = value
        self.experiment['defaults'] = self.name

        logger.info('Adding new experiment {}: {}'.format(
            key, pretty_dict(self.experiment)))
        cb = Couchbase.connect(bucket='experiments', **SF_STORAGE)
        cb.set(key, self.experiment)
Ejemplo n.º 15
0
class WorkerSettings(object):

    REUSE_WORKSPACE = 'false'
    WORKSPACE_DIR = '/tmp/{}'.format(uhex()[:12])

    def __init__(self, options):
        self.reuse_worker = options.get('reuse_workspace', self.REUSE_WORKSPACE)
        self.worker_dir = options.get('workspace_location', self.WORKSPACE_DIR)

    def __str__(self):
        return str(self.__dict__)
Ejemplo n.º 16
0
 def _generate_benchmark(self, metric: str, value: Union[float, int],
                         snapshots: List[str]) -> JSON:
     return {
         'build': self.build,
         'buildURL': os.environ.get('BUILD_URL'),
         'dateTime': time.strftime('%Y-%m-%d %H:%M'),
         'id': uhex(),
         'metric': metric,
         'snapshots': snapshots,
         'value': value,
     }
Ejemplo n.º 17
0
    def collect_info(self):
        logger.info('Running cbcollect_info')

        run('rm -f *.zip')

        fname = '{}.zip'.format(uhex())
        r = run('{}/bin/cbcollect_info.exe {}'.format(self.CB_DIR, fname),
                warn_only=True)
        if not r.return_code:
            get('{}'.format(fname))
            run('rm -f {}'.format(fname))
Ejemplo n.º 18
0
def post_initial(initial_time):
    metric_id = get_metric_id()
    data = {
        'build': args.version,
        'buildURL': os.environ.get('BUILD_URL'),
        'dateTime': time.strftime('%Y-%m-%d %H:%M'),
        'id': uhex(),
        'metric': 'secondary_fdb_standalone_{}_ini_nyx'.format(metric_id),
        'value': initial_time,
    }
    post_benchmark(data)
Ejemplo n.º 19
0
 def _prepare_data(self, metric, value):
     key = uhex()
     data = {
         'build': self.test.build,
         'metric': metric,
         'value': value,
         'snapshots': self.test.snapshots
     }
     if self.test.master_events:
         data.update({'master_events': key})
     return key, data
Ejemplo n.º 20
0
    def collect_info(self):
        logger.info('Running cbcollect_info')

        run('rm -f *.zip')

        fname = '{}.zip'.format(uhex())
        r = run('{}/bin/cbcollect_info.exe {}'.format(self.CB_DIR, fname),
                warn_only=True)
        if not r.return_code:
            get('{}'.format(fname))
            run('rm -f {}'.format(fname))
Ejemplo n.º 21
0
 def _prepare_data(self, metric, value):
     key = uhex()
     data = {
         'build': self.test.build,
         'metric': metric,
         'value': value,
         'snapshots': self.test.snapshots
     }
     if self.test.master_events:
         data.update({'master_events': key})
     return key, data
Ejemplo n.º 22
0
 def _prepare_data(self, metric, value):
     key = uhex()
     data = {
         'build': self.test.build,
         'metric': metric,
         'value': value,
         'snapshots': self.test.snapshots,
         'build_url': os.environ.get('BUILD_URL')
     }
     if self.test.master_events:
         data.update({'master_events': key})
     return key, data
Ejemplo n.º 23
0
    def post_results(self, value):
        self.update_defaults()

        key = uhex()
        self.experiment['value'] = value
        self.experiment['defaults'] = self.name

        logger.info('Adding new experiment {}: {}'.format(
            key, pretty_dict(self.experiment)
        ))
        cb = Couchbase.connect(bucket='experiments', **SF_STORAGE)
        cb.set(key, self.experiment)
Ejemplo n.º 24
0
    def __init__(self, test):
        self.clusters = OrderedDict()
        for cluster_name, servers in test.cluster_spec.yield_clusters():
            cluster = '{}_{}_{}'.format(cluster_name,
                                        test.build.replace('.', ''),
                                        uhex()[:3])
            master = servers[0].split(':')[0]
            self.clusters[cluster] = master

        self.index_node = ''
        for _, servers in test.cluster_spec.yield_servers_by_role('index'):
            if servers:
                self.index_node = servers[0].split(':')[0]

        if hasattr(test, 'ALL_BUCKETS'):
            buckets = None
        else:
            buckets = test.test_config.buckets[:1]
        if hasattr(test, 'ALL_HOSTNAMES'):
            hostnames = tuple(test.cluster_spec.yield_hostnames())
        else:
            hostnames = None

        self.settings = type(
            'settings', (object, ), {
                'seriesly_host':
                test.test_config.stats_settings.seriesly['host'],
                'cbmonitor_host_port':
                test.test_config.stats_settings.cbmonitor['host'],
                'interval':
                test.test_config.stats_settings.interval,
                'secondary_statsfile':
                test.test_config.stats_settings.secondary_statsfile,
                'buckets':
                buckets,
                'hostnames':
                hostnames,
                'sync_gateway_nodes':
                test.remote.gateways if test.remote else None,
            })()
        self.lat_interval = test.test_config.stats_settings.lat_interval
        if test.cluster_spec.ssh_credentials:
            self.settings.ssh_username, self.settings.ssh_password = \
                test.cluster_spec.ssh_credentials
        self.settings.rest_username, self.settings.rest_password = \
            test.cluster_spec.rest_credentials
        self.settings.bucket_password = test.test_config.bucket.password

        self.settings.index_node = self.index_node

        self.collectors = []
        self.processes = []
        self.snapshots = []
Ejemplo n.º 25
0
 def _generate_benchmark(self,
                         metric: str,
                         value: Union[float, int],
                         snapshots: List[str]) -> JSON:
     return {
         'build': self.build,
         'buildURL': os.environ.get('BUILD_URL'),
         'dateTime': time.strftime('%Y-%m-%d %H:%M'),
         'id': uhex(),
         'metric': metric,
         'snapshots': snapshots,
         'value': value,
     }
Ejemplo n.º 26
0
 def _prepare_data(self, metric, value):
     key = uhex()
     master = self.test.cluster_spec.yield_masters().next()
     build = self.test.rest.get_version(master)
     data = {
         'build': build,
         'metric': metric,
         'value': value,
         'snapshots': self.test.snapshots
     }
     if self.test.master_events:
         data.update({'master_events': key})
     return key, data
Ejemplo n.º 27
0
 def _prepare_data(self, metric, value):
     key = uhex()
     master = self.test.cluster_spec.yield_masters().next()
     build = self.test.rest.get_version(master)
     data = {
         'build': build,
         'metric': metric,
         'value': value,
         'snapshots': self.test.snapshots
     }
     if self.test.master_events:
         data.update({'master_events': key})
     return key, data
Ejemplo n.º 28
0
    def __init__(self, cluster_spec, test_config):
        self.cluster_spec = cluster_spec
        self.test_config = test_config

        self.worker_hosts = cluster_spec.workers
        self.queues = []
        self.workers = []

        self.user, self.password = cluster_spec.client_credentials

        self.temp_dir = '/tmp/{}'.format(uhex()[:12])
        with settings(user=self.user, password=self.password):
            self._initialize_project()
            self._start()
Ejemplo n.º 29
0
    def __init__(self, cluster_spec, test_config):
        self.cluster_spec = cluster_spec
        self.test_config = test_config

        self.worker_hosts = cluster_spec.workers
        self.queues = []
        self.workers = []

        self.user, self.password = cluster_spec.client_credentials

        self.temp_dir = '/tmp/{}'.format(uhex()[:12])
        with settings(user=self.user, password=self.password):
            self._initialize_project()
            self._start()
Ejemplo n.º 30
0
    def profile(self, host: str, service: str, profile: str):
        logger.info('Collecting {} profile on {}'.format(profile, host))

        if 'syncgateway' in self.test_config.profiling_settings.services:
            if profile == 'sg_cpu':
                url = 'http://{}:4985/_profile'.format(host)
                filename = '{}_{}_{}_{}.pprof'.format(host, service, profile,
                                                      uhex()[:6])
                requests.post(url=url, data=json.dumps({"file": filename}))
                time.sleep(self.test_config.profiling_settings.cpu_interval)
                requests.post(url=url, data=json.dumps({}))

            if profile == 'sg_heap':
                filename = '{}_{}_{}_{}.pprof'.format(host, service, profile,
                                                      uhex()[:6])
                url = 'http://{}:4985/_heap'.format(host)
                requests.post(url=url, data=json.dumps({"file": filename}))

            if profile == 'goroutine':
                url = 'http://{}:4985/_debug/pprof/goroutine'.format(host)
                response = requests.get(url=url)
                self.save(host, service, profile, response.content)

            self.copy_profiles(host=host)
Ejemplo n.º 31
0
    def collect_info(self):
        logger.info('Running cbcollect_info with redaction')

        run('rm -f /tmp/*.zip')

        fname = '/tmp/{}.zip'.format(uhex())
        try:
            r = run('{}/bin/cbcollect_info {}'
                    .format(self.CB_DIR, fname), warn_only=True, timeout=1200)
        except CommandTimeout:
            logger.error('cbcollect_info timed out')
            return
        if not r.return_code:
            get('{}'.format(fname))
            run('rm -f {}'.format(fname))
Ejemplo n.º 32
0
    def collect_info(self):
        logger.info('Running cbcollect_info with redaction')

        run('rm -f /tmp/*.zip')

        fname = '/tmp/{}.zip'.format(uhex())
        try:
            r = run('{}/bin/cbcollect_info {}'
                    .format(self.CB_DIR, fname), warn_only=True, timeout=1200)
        except CommandTimeout:
            logger.error('cbcollect_info timed out')
            return
        if not r.return_code:
            get('{}'.format(fname))
            run('rm -f {}'.format(fname))
Ejemplo n.º 33
0
    def _generate_benchmark(self, metric: str, value: Union[float, int],
                            snapshots: List[str]) -> JSON:

        if self.test_config.sdktesting_settings.enable_sdktest:
            self.sdk_version = self.test_config.ycsb_settings.sdk_version
            self.build = self.sdk_version + ' : ' + self.build

        return {
            'build': self.build,
            'buildURL': os.environ.get('BUILD_URL'),
            'dateTime': time.strftime('%Y-%m-%d %H:%M'),
            'id': uhex(),
            'metric': metric,
            'snapshots': snapshots,
            'value': value,
        }
Ejemplo n.º 34
0
    def measure(self, src_client):

        key = "sgimport_{}".format(uhex())

        doc = self.new_docs.next(key)

        last_sequence = self.get_lastsequence(host=self.sg_host)

        executor = ThreadPoolExecutor(max_workers=2)
        future1 = executor.submit(self.check_longpoll_changefeed,
                                  host=self.sg_host,
                                  key=key,
                                  last_sequence=last_sequence)
        future2 = executor.submit(self.insert_doc,
                                  src_client=src_client,
                                  key=key,
                                  doc=doc)
        t1, t0 = future1.result(), future2.result()
        print('import latency t1, t0', t1, t0, (t1 - t0) * 1000)

        return {'sgimport_latency': (t1 - t0) * 1000}  # s -> ms
Ejemplo n.º 35
0
    def endure(self, pool, metric):
        client = pool.get_client()

        key = uhex()
        doc = self.new_docs.next(key)

        t0 = time()

        client.upsert(key, doc)
        if metric == "latency_persist_to":
            client.endure(key, persist_to=1, replicate_to=0, interval=0.010,
                          timeout=120)
        else:
            client.endure(key, persist_to=0, replicate_to=1, interval=0.001)

        latency = 1000 * (time() - t0)  # Latency in ms

        sleep_time = max(0, self.MAX_POLLING_INTERVAL - latency)

        client.delete(key)
        pool.release_client(client)
        return {metric: latency}, sleep_time
Ejemplo n.º 36
0
    def measure(self, src_client, dst_client):
        key = "xdcr_{}".format(uhex())
        doc = self.new_docs.next(key)

        polling_interval = self.INITIAL_POLLING_INTERVAL

        src_client.upsert(key, doc)

        t0 = time()
        while time() - t0 < self.TIMEOUT:
            if dst_client.get(key, quiet=True).success:
                break
            sleep(polling_interval)
            polling_interval *= 1.05  # increase interval by 5%
        else:
            logger.warn('XDCR sampling timed out after {} seconds'.format(
                self.TIMEOUT))
        t1 = time()

        src_client.remove(key, quiet=True)
        dst_client.remove(key, quiet=True)

        return {'xdcr_lag': (t1 - t0) * 1000}  # s -> ms
Ejemplo n.º 37
0
    def profile(self, host: str, service: str, profile: str):
        logger.info('Collecting {} profile on {}'.format(profile, host))

        endpoint = self.ENDPOINTS[profile]
        port = self.DEBUG_PORTS[service]

        if self.profiling_settings.linux_perf_profile_flag:
            logger.info('Collecting {} profile on {} using linux perf '
                        'reccord'.format(profile, host))

            fname = 'linux_{}_{}_{}_perf.data'.format(host, profile,
                                                      uhex()[:4])
            self.linux_perf_profile(host=host,
                                    fname=fname,
                                    path=self.linux_perf_path)

        else:
            logger.info('Collecting {} profile on {}'.format(profile, host))

            with self.new_tunnel(host, port) as tunnel:
                url = endpoint.format(tunnel.local_bind_port)
                response = requests.get(url=url, auth=self.rest.auth)
                self.save(host, service, profile, response.content)
Ejemplo n.º 38
0
    def _create_doc(self, pool):
        client = pool.get_client()

        key = uhex()
        client.set(key, {"city": key})
        return client, key
Ejemplo n.º 39
0
    def create_alt_mail_doc(pool):
        client = pool.get_client()

        key = uhex()
        client.set(key, {"alt_email": key})
        return client, key
Ejemplo n.º 40
0
 def save(self, host: str, service: str, profile: str, content: bytes):
     fname = '{}_{}_{}_{}.pprof'.format(host, service, profile, uhex()[:6])
     with open(fname, 'wb') as fh:
         fh.write(content)
Ejemplo n.º 41
0
 def save(self, host: str, service: str, profile: str, content: bytes):
     fname = '{}_{}_{}_{}.pprof'.format(host, service, profile, uhex()[:6])
     logger.info('Collected {} '.format(fname))
     with open(fname, 'wb') as fh:
         fh.write(content)
Ejemplo n.º 42
0
    def create_alt_mail_doc(pool):
        client = pool.get_client()

        key = uhex()
        client.set(key, {"alt_email": key})
        return client, key
Ejemplo n.º 43
0
 def _create_doc(self, pool):
     client = pool.get_client()
     key = uhex()
     client.set(key, {"city": key})
     return client, key