示例#1
0
    def __init__(self, settings, cluster_spec: ClusterSpec,
                 test_config: TestConfig):
        self.cluster_spec = cluster_spec
        self.test_config = test_config
        self.mc = MetadataClient(settings)
        self.store = PerfStore(settings.cbmonitor_host)
        self.workload_setting = PhaseSettings

        self.interval = self.MAX_SAMPLING_INTERVAL

        self.cluster = settings.cluster

        self.clients = []

        self.cb_host = self.cluster_spec.servers[int(self.test_config.nodes)]

        self.sg_host = next(self.cluster_spec.masters)

        src_client = new_client(host=self.cb_host,
                                bucket='bucket-1',
                                password='******',
                                timeout=self.TIMEOUT)

        self.clients.append(('bucket-1', src_client))

        self.new_docs = Document(1024)
示例#2
0
class DurabilityLatency(ObserveIndexLatency, Latency):

    COLLECTOR = "durability"

    METRICS = "latency_replicate_to", "latency_persist_to"

    DURABILITY_TIMEOUT = 120

    def __init__(self, settings, workload):
        super().__init__(settings)

        self.new_docs = Document(workload.size)

        self.pools = self.init_pool(settings)

    @staticmethod
    def gen_key() -> Key:
        return Key(number=numpy.random.random_integers(0, 10 ** 9),
                   prefix='endure',
                   fmtr='hex')

    def endure(self, pool, metric):
        client = pool.get_client()

        key = self.gen_key()
        doc = self.new_docs.next(key)

        t0 = time()

        client.upsert(key.string, doc)
        if metric == "latency_persist_to":
            client.endure(key.string, persist_to=1, replicate_to=0, interval=0.010,
                          timeout=120)
        else:
            client.endure(key.string, persist_to=0, replicate_to=1, interval=0.001)

        latency = 1000 * (time() - t0)  # Latency in ms

        sleep_time = max(0, self.MAX_POLLING_INTERVAL - latency)

        client.delete(key.string)
        pool.release_client(client)
        return {metric: latency}, sleep_time

    def sample(self):
        while True:
            for bucket, pool in self.pools:
                for metric in self.METRICS:
                    try:
                        stats, sleep_time = self.endure(pool, metric)
                        self.store.append(stats,
                                          cluster=self.cluster,
                                          bucket=bucket,
                                          collector=self.COLLECTOR)
                        sleep(sleep_time)
                    except Exception as e:
                        logger.warn(e)

    def collect(self):
        ObserveIndexLatency.collect(self)
示例#3
0
 def init_docs(self):
     if not hasattr(self.ws, 'doc_gen') or self.ws.doc_gen == 'basic':
         self.docs = Document(self.ws.size)
     elif self.ws.doc_gen == 'string':
         self.docs = String(self.ws.size)
     elif self.ws.doc_gen == 'nested':
         self.docs = NestedDocument(self.ws.size)
     elif self.ws.doc_gen == 'reverse_lookup':
         self.docs = ReverseLookupDocument(self.ws.size,
                                           self.ts.prefix)
     elif self.ws.doc_gen == 'reverse_range_lookup':
         self.docs = ReverseRangeLookupDocument(self.ws.size,
                                                prefix='n1ql',
                                                range_distance=self.ws.range_distance)
     elif self.ws.doc_gen == 'ext_reverse_lookup':
         self.docs = ExtReverseLookupDocument(self.ws.size,
                                              self.ts.prefix,
                                              self.ws.items)
     elif self.ws.doc_gen == 'join':
         self.docs = JoinedDocument(self.ws.size,
                                    self.ts.prefix,
                                    self.ws.items,
                                    self.ws.num_categories,
                                    self.ws.num_replies)
     elif self.ws.doc_gen == 'ref':
         self.docs = RefDocument(self.ws.size,
                                 self.ts.prefix)
     elif self.ws.doc_gen == 'array_indexing':
         self.docs = ArrayIndexingDocument(self.ws.size,
                                           self.ts.prefix,
                                           self.ws.array_size,
                                           self.ws.items)
     elif self.ws.doc_gen == 'profile':
         self.docs = ProfileDocument(self.ws.size,
                                     self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_simple':
         self.docs = ImportExportDocument(self.ws.size,
                                          self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_array':
         self.docs = ImportExportDocumentArray(self.ws.size,
                                               self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_nested':
         self.docs = ImportExportDocumentNested(self.ws.size,
                                                self.ts.prefix)
     elif self.ws.doc_gen == 'large_subdoc':
         self.docs = LargeDocument(self.ws.size)
     elif self.ws.doc_gen == 'gsi_multiindex':
         self.docs = GSIMultiIndexDocument(self.ws.size)
     elif self.ws.doc_gen == 'small_plasma':
         self.docs = SmallPlasmaDocument(self.ws.size)
     elif self.ws.doc_gen == 'sequential_plasma':
         self.docs = SequentialPlasmaDocument(self.ws.size)
     elif self.ws.doc_gen == 'large_item_plasma':
         self.docs = LargeItemPlasmaDocument(self.ws.size,
                                             self.ws.item_size)
     elif self.ws.doc_gen == 'varying_item_plasma':
         self.docs = VaryingItemSizePlasmaDocument(self.ws.size,
                                                   self.ws.size_variation_min,
                                                   self.ws.size_variation_max)
示例#4
0
    def __init__(self, settings, workload):
        super().__init__(settings)

        self.interval = self.MAX_SAMPLING_INTERVAL

        self.clients = []
        for bucket in self.get_buckets():
            src_client = new_client(host=settings.master_node,
                                    bucket=bucket,
                                    password=settings.bucket_password,
                                    timeout=self.TIMEOUT)
            dst_client = new_client(host=settings.dest_master_node,
                                    bucket=bucket,
                                    password=settings.bucket_password,
                                    timeout=self.TIMEOUT)
            self.clients.append((bucket, src_client, dst_client))

        self.new_docs = Document(workload.size)
示例#5
0
class DurabilityLatency(ObserveIndexLatency, Latency):

    COLLECTOR = "durability"

    METRICS = "latency_replicate_to", "latency_persist_to"

    DURABILITY_TIMEOUT = 120

    def __init__(self, settings, workload):
        super().__init__(settings)

        self.new_docs = Document(workload.size)

        self.pools = self.init_pool(settings)

    def endure(self, pool, metric):
        client = pool.get_client()

        key = uhex()
        doc = self.new_docs.next(key)

        t0 = time()

        client.upsert(key, doc)
        if metric == "latency_persist_to":
            client.endure(key, persist_to=1, replicate_to=0, interval=0.010,
                          timeout=120)
        else:
            client.endure(key, persist_to=0, replicate_to=1, interval=0.001)

        latency = 1000 * (time() - t0)  # Latency in ms

        sleep_time = max(0, self.MAX_POLLING_INTERVAL - latency)

        client.delete(key)
        pool.release_client(client)
        return {metric: latency}, sleep_time

    def sample(self):
        while True:
            for bucket, pool in self.pools:
                for metric in self.METRICS:
                    try:
                        stats, sleep_time = self.endure(pool, metric)
                        self.store.append(stats,
                                          cluster=self.cluster,
                                          bucket=bucket,
                                          collector=self.COLLECTOR)
                        sleep(sleep_time)
                    except Exception as e:
                        logger.warn(e)

    def collect(self):
        ObserveIndexLatency.collect(self)
示例#6
0
 def init_docs(self):
     if not hasattr(self.ws, 'doc_gen') or self.ws.doc_gen == 'basic':
         self.docs = Document(self.ws.size)
     elif self.ws.doc_gen == 'string':
         self.docs = String(self.ws.size)
     elif self.ws.doc_gen == 'nested':
         self.docs = NestedDocument(self.ws.size)
     elif self.ws.doc_gen == 'reverse_lookup':
         self.docs = ReverseLookupDocument(self.ws.size, self.ts.prefix)
     elif self.ws.doc_gen == 'reverse_range_lookup':
         self.docs = ReverseRangeLookupDocument(self.ws.size,
                                                self.ts.prefix,
                                                self.ws.range_distance)
     elif self.ws.doc_gen == 'ext_reverse_lookup':
         self.docs = ExtReverseLookupDocument(self.ws.size, self.ts.prefix,
                                              self.ws.items)
     elif self.ws.doc_gen == 'hash_join':
         self.docs = HashJoinDocument(self.ws.size, self.ts.prefix,
                                      self.ws.range_distance)
     elif self.ws.doc_gen == 'join':
         self.docs = JoinedDocument(self.ws.size, self.ts.prefix,
                                    self.ws.items, self.ws.num_categories,
                                    self.ws.num_replies)
     elif self.ws.doc_gen == 'ref':
         self.docs = RefDocument(self.ws.size, self.ts.prefix)
     elif self.ws.doc_gen == 'array_indexing':
         self.docs = ArrayIndexingDocument(self.ws.size, self.ts.prefix,
                                           self.ws.array_size,
                                           self.ws.items)
     elif self.ws.doc_gen == 'array_indexing_unique':
         self.docs = ArrayIndexingUniqueDocument(self.ws.size,
                                                 self.ts.prefix,
                                                 self.ws.array_size,
                                                 self.ws.items)
     elif self.ws.doc_gen == 'array_indexing_range_scan':
         self.docs = ArrayIndexingRangeScanDocument(self.ws.size,
                                                    self.ts.prefix,
                                                    self.ws.array_size,
                                                    self.ws.items)
     elif self.ws.doc_gen == 'profile':
         self.docs = ProfileDocument(self.ws.size, self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_simple':
         self.docs = ImportExportDocument(self.ws.size, self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_array':
         self.docs = ImportExportDocumentArray(self.ws.size, self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_nested':
         self.docs = ImportExportDocumentNested(self.ws.size,
                                                self.ts.prefix)
     elif self.ws.doc_gen == 'large':
         self.docs = LargeDocument(self.ws.size)
     elif self.ws.doc_gen == 'gsi_multiindex':
         self.docs = GSIMultiIndexDocument(self.ws.size)
     elif self.ws.doc_gen == 'small_plasma':
         self.docs = SmallPlasmaDocument(self.ws.size)
     elif self.ws.doc_gen == 'sequential_plasma':
         self.docs = SequentialPlasmaDocument(self.ws.size)
     elif self.ws.doc_gen == 'large_item_plasma':
         self.docs = LargeItemPlasmaDocument(self.ws.size,
                                             self.ws.item_size)
     elif self.ws.doc_gen == 'varying_item_plasma':
         self.docs = VaryingItemSizePlasmaDocument(
             self.ws.size, self.ws.size_variation_min,
             self.ws.size_variation_max)
     elif self.ws.doc_gen == 'eventing_small':
         self.docs = EventingSmallDocument(self.ws.size)
     elif self.ws.doc_gen == 'tpc_ds':
         self.docs = TpcDsDocument()
     elif self.ws.doc_gen == 'package':
         self.docs = PackageDocument(self.ws.size)
     elif self.ws.doc_gen == 'incompressible':
         self.docs = IncompressibleString(self.ws.size)
     elif self.ws.doc_gen == 'big_fun':
         self.docs = BigFunDocument()
     elif self.ws.doc_gen == 'multibucket':
         self.docs = MultiBucketDocument(self.ws.size)
     elif self.ws.doc_gen == 'advancedfilter':
         self.docs = AdvFilterDocument(self.ws.size)
     elif self.ws.doc_gen == 'advancedfilterxattr':
         self.docs = AdvFilterXattrBody(self.ws.size)
示例#7
0
    def __init__(self, settings, workload):
        super().__init__(settings)

        self.new_docs = Document(workload.size)

        self.pools = self.init_pool(settings)
示例#8
0
class XdcrLag(Latency):

    COLLECTOR = "xdcr_lag"

    METRICS = "xdcr_lag",

    INITIAL_POLLING_INTERVAL = 0.001  # 1 ms

    TIMEOUT = 600  # 10 minutes

    MAX_SAMPLING_INTERVAL = 0.25  # 250 ms

    def __init__(self, settings, workload):
        super().__init__(settings)

        self.interval = self.MAX_SAMPLING_INTERVAL

        self.clients = []
        for bucket in self.get_buckets():
            src_client = new_client(host=settings.master_node,
                                    bucket=bucket,
                                    password=settings.bucket_password,
                                    timeout=self.TIMEOUT)
            dst_client = new_client(host=settings.dest_master_node,
                                    bucket=bucket,
                                    password=settings.bucket_password,
                                    timeout=self.TIMEOUT)
            self.clients.append((bucket, src_client, dst_client))

        self.new_docs = Document(workload.size)

    @staticmethod
    def gen_key() -> Key:
        return Key(number=numpy.random.random_integers(0, 10 ** 9),
                   prefix='xdcr',
                   fmtr='hex')

    def measure(self, src_client, dst_client):
        key = self.gen_key()
        doc = self.new_docs.next(key)

        polling_interval = self.INITIAL_POLLING_INTERVAL

        src_client.upsert(key.string, doc)

        t0 = time()
        while time() - t0 < self.TIMEOUT:
            if dst_client.get(key.string, quiet=True).success:
                break
            sleep(polling_interval)
            polling_interval *= 1.05  # increase interval by 5%
        else:
            logger.warn('XDCR sampling timed out after {} seconds'
                        .format(self.TIMEOUT))
        t1 = time()

        src_client.remove(key.string, quiet=True)
        dst_client.remove(key.string, quiet=True)

        return {'xdcr_lag': (t1 - t0) * 1000}  # s -> ms

    def sample(self):
        for bucket, src_client, dst_client in self.clients:
            lags = self.measure(src_client, dst_client)
            self.store.append(lags,
                              cluster=self.cluster,
                              bucket=bucket,
                              collector=self.COLLECTOR)
示例#9
0
class SGImport_latency(Collector):
    COLLECTOR = "sgimport_latency"

    METRICS = "sgimport_latency"

    INITIAL_POLLING_INTERVAL = 0.001  # 1 ms

    TIMEOUT = 3600  # 1hr minutes

    MAX_SAMPLING_INTERVAL = 10  # 250 ms

    def __init__(self, settings, cluster_spec: ClusterSpec,
                 test_config: TestConfig):
        self.cluster_spec = cluster_spec
        self.test_config = test_config
        self.mc = MetadataClient(settings)
        self.store = PerfStore(settings.cbmonitor_host)
        self.workload_setting = PhaseSettings

        self.interval = self.MAX_SAMPLING_INTERVAL

        self.cluster = settings.cluster

        self.clients = []

        self.cb_host = self.cluster_spec.servers[int(self.test_config.nodes)]

        self.sg_host = next(self.cluster_spec.masters)

        src_client = new_client(host=self.cb_host,
                                bucket='bucket-1',
                                password='******',
                                timeout=self.TIMEOUT)

        self.clients.append(('bucket-1', src_client))

        self.new_docs = Document(1024)

    def check_longpoll_changefeed(self, host: str, key: str,
                                  last_sequence: str):

        sg_db = 'db'
        api = 'http://{}:4985/{}/_changes'.format(host, sg_db)

        last_sequence_str = "{}".format(last_sequence)

        data = {
            'filter': 'sync_gateway/bychannel',
            'feed': 'longpoll',
            "channels": "123",
            "since": last_sequence_str,
            "heartbeat": 3600000
        }

        response = requests.post(url=api, data=json.dumps(data))
        t1 = time()

        record_found = 0
        if response.status_code == 200:
            for record in response.json()['results']:
                if record['id'] == key:
                    record_found = 1
                    break
            if record_found != 1:
                self.check_longpoll_changefeed(host=host,
                                               key=key,
                                               last_sequence=last_sequence)
        return t1

    def insert_doc(self, src_client, key: str, doc):

        src_client.upsert(key, doc)
        return time()

    def get_lastsequence(self, host: str):
        sg_db = 'db'
        api = 'http://{}:4985/{}/_changes'.format(host, sg_db)

        data = {
            'filter': 'sync_gateway/bychannel',
            'feed': 'normal',
            "channels": "123",
            "since": "0"
        }

        response = requests.post(url=api, data=json.dumps(data))

        last_sequence = response.json()['last_seq']

        return last_sequence

    def measure(self, src_client):

        key = "sgimport_{}".format(uhex())

        doc = self.new_docs.next(key)

        last_sequence = self.get_lastsequence(host=self.sg_host)

        executor = ThreadPoolExecutor(max_workers=2)
        future1 = executor.submit(self.check_longpoll_changefeed,
                                  host=self.sg_host,
                                  key=key,
                                  last_sequence=last_sequence)
        future2 = executor.submit(self.insert_doc,
                                  src_client=src_client,
                                  key=key,
                                  doc=doc)
        t1, t0 = future1.result(), future2.result()
        print('import latency t1, t0', t1, t0, (t1 - t0) * 1000)

        return {'sgimport_latency': (t1 - t0) * 1000}  # s -> ms

    def sample(self):
        for bucket, src_client in self.clients:

            lags = self.measure(src_client)
            self.store.append(lags,
                              cluster=self.cluster,
                              collector=self.COLLECTOR)

    def update_metadata(self):
        self.mc.add_cluster()
        self.mc.add_metric(self.METRICS, collector=self.COLLECTOR)
示例#10
0
    def __init__(self, settings, workload):
        super().__init__(settings)

        self.new_docs = Document(workload.size)

        self.pools = self.init_pool(settings)
示例#11
0
 def update(self, key, subdoc_fields, size):
     newdoc = Document(size)
     alphabet = newdoc._build_alphabet(key)
     for field in subdoc_fields.split(','):
         new_field_value = getattr(newdoc, '_build_' + field)(alphabet)
         self.client.mutate_in(key, SD.upsert(field, new_field_value))