def __init__(self, settings, workload, ddocs, params, index_type, prefix=None): super(SpringQueryLatency, self).__init__(settings, workload, prefix) if index_type is None: self.new_queries = ViewQueryGen(ddocs, params) else: self.new_queries = ViewQueryGenByType(index_type, params)
def __init__(self, workload_settings, target_settings, shutdown_event): super().__init__(workload_settings, target_settings, shutdown_event) self.reservoir = Reservoir(num_workers=self.ws.query_workers) if workload_settings.index_type is None: self.new_queries = ViewQueryGen(workload_settings.ddocs, workload_settings.query_params) else: self.new_queries = ViewQueryGenByType( workload_settings.index_type, workload_settings.query_params)
def __init__(self, workload_settings, target_settings, shutdown_event): super(ViewWorker, self).__init__(workload_settings, target_settings, shutdown_event) self.total_workers = self.ws.query_workers self.throughput = self.ws.query_throughput if workload_settings.index_type is None: self.new_queries = ViewQueryGen(workload_settings.ddocs, workload_settings.qparams) else: self.new_queries = ViewQueryGenByType(workload_settings.index_type, workload_settings.qparams)
class SpringQueryLatency(SpringLatency): COLLECTOR = "spring_query_latency" METRICS = ("latency_query", ) def __init__(self, settings, workload, ddocs, params, index_type, prefix=None): super(SpringQueryLatency, self).__init__(settings, workload, prefix) if index_type is None: self.new_queries = ViewQueryGen(ddocs, params) else: self.new_queries = ViewQueryGenByType(index_type, params) def measure(self, client, metric, bucket): key = self.existing_keys.next(curr_items=self.items, curr_deletes=0) doc = self.new_docs.next(key) ddoc_name, view_name, query = self.new_queries.next(doc) _, latency = client.query(ddoc_name, view_name, query=query) return 1000 * latency # s -> ms
class ViewWorker(Worker): NAME = 'query-worker' def __init__(self, workload_settings, target_settings, shutdown_event): super().__init__(workload_settings, target_settings, shutdown_event) self.total_workers = self.ws.query_workers self.throughput = self.ws.query_throughput self.reservoir = Reservoir(num_workers=self.ws.query_workers) if workload_settings.index_type is None: self.new_queries = ViewQueryGen(workload_settings.ddocs, workload_settings.query_params) else: self.new_queries = ViewQueryGenByType( workload_settings.index_type, workload_settings.query_params) @with_sleep def do_batch(self): curr_items_spot = \ self.curr_items.value - self.ws.creates * self.ws.workers deleted_spot = \ self.deleted_items.value + self.ws.deletes * self.ws.workers for _ in range(self.BATCH_SIZE): key = self.existing_keys.next(curr_items_spot, deleted_spot) doc = self.docs.next(key) ddoc_name, view_name, query = self.new_queries.next(doc) latency = self.cb.view_query(ddoc_name, view_name, query=query) self.reservoir.update(operation='query', value=latency) def run(self, sid, lock, curr_ops, curr_items, deleted_items, *args): self.cb.start_updater() if self.throughput < float('inf'): self.target_time = float(self.BATCH_SIZE) * self.total_workers / \ self.throughput else: self.target_time = None self.sid = sid self.curr_items = curr_items self.deleted_items = deleted_items try: logger.info('Started: {}-{}'.format(self.NAME, self.sid)) while not self.time_to_stop(): self.do_batch() except KeyboardInterrupt: logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid)) else: logger.info('Finished: {}-{}'.format(self.NAME, self.sid)) self.dump_stats()
class ViewWorker(Worker): NAME = 'view-worker' def __init__(self, workload_settings, target_settings, shutdown_event): super(ViewWorker, self).__init__(workload_settings, target_settings, shutdown_event) self.total_workers = self.ws.query_workers self.throughput = self.ws.query_throughput if workload_settings.index_type is None: self.new_queries = ViewQueryGen(workload_settings.ddocs, workload_settings.qparams) else: self.new_queries = ViewQueryGenByType(workload_settings.index_type, workload_settings.qparams) @with_sleep def do_batch(self): curr_items_spot = \ self.curr_items.value - self.ws.creates * self.ws.workers deleted_spot = \ self.deleted_items.value + self.ws.deletes * self.ws.workers for _ in range(self.BATCH_SIZE): key = self.existing_keys.next(curr_items_spot, deleted_spot) doc = self.docs.next(key) doc['key'] = key doc['bucket'] = self.ts.bucket ddoc_name, view_name, query = self.new_queries.next(doc) self.cb.query(ddoc_name, view_name, query=query) def run(self, sid, lock, curr_queries, curr_items, deleted_items): self.cb.start_updater() if self.throughput < float('inf'): self.target_time = float(self.BATCH_SIZE) * self.total_workers / \ self.throughput else: self.target_time = None self.sid = sid self.curr_items = curr_items self.deleted_items = deleted_items self.curr_queries = curr_queries try: logger.info('Started: {}-{}'.format(self.NAME, self.sid)) while curr_queries.value < self.ws.ops and not self.time_to_stop(): with lock: curr_queries.value += self.BATCH_SIZE self.do_batch() except (KeyboardInterrupt, ValueFormatError, AttributeError) as e: logger.info('Interrupted: {}-{}, {}'.format(self.NAME, self.sid, e)) else: logger.info('Finished: {}-{}'.format(self.NAME, self.sid))
def __init__(self, workload_settings, target_settings, shutdown_event): super(ViewWorker, self).__init__(workload_settings, target_settings, shutdown_event) self.total_workers = self.ws.query_workers self.throughput = self.ws.query_throughput self.name = 'query-worker' if workload_settings.index_type is None: self.new_queries = ViewQueryGen(workload_settings.ddocs, workload_settings.qparams) else: self.new_queries = ViewQueryGenByType(workload_settings.index_type, workload_settings.qparams)
class SpringQueryLatency(SpringLatency): COLLECTOR = "spring_query_latency" METRICS = ("latency_query",) def __init__(self, settings, workload, ddocs, params, index_type, prefix=None): super(SpringQueryLatency, self).__init__(settings, workload, prefix) if index_type is None: self.new_queries = ViewQueryGen(ddocs, params) else: self.new_queries = ViewQueryGenByType(index_type, params) def measure(self, client, metric, bucket): key = self.existing_keys.next(curr_items=self.items, curr_deletes=0) doc = self.new_docs.next(key) ddoc_name, view_name, query = self.new_queries.next(doc) _, latency = client.query(ddoc_name, view_name, query=query) return 1000 * latency # s -> ms