コード例 #1
0
class ViewWorker(Worker):

    NAME = 'query-worker'

    def __init__(self, workload_settings, target_settings, shutdown_event):
        super().__init__(workload_settings, target_settings, shutdown_event)

        self.total_workers = self.ws.query_workers
        self.throughput = self.ws.query_throughput

        self.reservoir = Reservoir(num_workers=self.ws.query_workers)

        if workload_settings.index_type is None:
            self.new_queries = ViewQueryGen(workload_settings.ddocs,
                                            workload_settings.query_params)
        else:
            self.new_queries = ViewQueryGenByType(
                workload_settings.index_type, workload_settings.query_params)

    @with_sleep
    def do_batch(self):
        curr_items_spot = \
            self.curr_items.value - self.ws.creates * self.ws.workers
        deleted_spot = \
            self.deleted_items.value + self.ws.deletes * self.ws.workers

        for _ in range(self.BATCH_SIZE):
            key = self.existing_keys.next(curr_items_spot, deleted_spot)
            doc = self.docs.next(key)
            ddoc_name, view_name, query = self.new_queries.next(doc)

            latency = self.cb.view_query(ddoc_name, view_name, query=query)

            self.reservoir.update(operation='query', value=latency)

    def run(self, sid, lock, curr_ops, curr_items, deleted_items, *args):
        self.cb.start_updater()

        if self.throughput < float('inf'):
            self.target_time = float(self.BATCH_SIZE) * self.total_workers / \
                self.throughput
        else:
            self.target_time = None
        self.sid = sid
        self.curr_items = curr_items
        self.deleted_items = deleted_items

        try:
            logger.info('Started: {}-{}'.format(self.NAME, self.sid))
            while not self.time_to_stop():
                self.do_batch()
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.reservoir.dump(filename='{}-{}'.format(self.NAME, self.sid))
コード例 #2
0
class N1QLWorker(Worker):

    NAME = 'query-worker'

    def __init__(self,
                 workload_settings,
                 target_settings,
                 shutdown_event=None):
        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)
        self.total_workers = workload_settings.n1ql_workers
        self.throughput = workload_settings.n1ql_throughput

        self.reservoir = Reservoir(num_workers=workload_settings.n1ql_workers)

        super().__init__(workload_settings, target_settings, shutdown_event)

        self.init_creds()

    def init_keys(self):
        self.new_keys = NewOrderedKey(prefix='n1ql', fmtr=self.ws.key_fmtr)

        self.existing_keys = UniformKey(prefix='n1ql', fmtr=self.ws.key_fmtr)

        self.keys_for_cas_update = KeyForCASUpdate(
            total_workers=self.total_workers,
            prefix='n1ql',
            fmtr=self.ws.key_fmtr)

    def init_docs(self):
        if self.ws.doc_gen == 'reverse_lookup':
            self.docs = ReverseLookupDocument(self.ws.size, prefix='n1ql')
        elif self.ws.doc_gen == 'reverse_range_lookup':
            self.docs = ReverseRangeLookupDocument(
                self.ws.size,
                prefix='n1ql',
                range_distance=self.ws.range_distance)
        elif self.ws.doc_gen == 'ext_reverse_lookup':
            self.docs = ExtReverseLookupDocument(self.ws.size,
                                                 prefix='n1ql',
                                                 num_docs=self.ws.items)
        elif self.ws.doc_gen == 'join':
            self.docs = JoinedDocument(self.ws.size,
                                       prefix='n1ql',
                                       num_docs=self.ws.items,
                                       num_categories=self.ws.num_categories,
                                       num_replies=self.ws.num_replies)
        elif self.ws.doc_gen == 'ref':
            self.docs = RefDocument(self.ws.size, prefix='n1ql')
        elif self.ws.doc_gen == 'profile':
            self.docs = ProfileDocument(self.ws.size, prefix='n1ql')
        elif self.ws.doc_gen == 'array_indexing':
            self.docs = ArrayIndexingDocument(self.ws.size,
                                              prefix='n1ql',
                                              array_size=self.ws.array_size,
                                              num_docs=self.ws.items)

    def init_creds(self):
        for bucket in getattr(self.ws, 'buckets', []):
            self.cb.client.add_bucket_creds(bucket, self.ts.password)

    def read(self):
        curr_items_tmp = self.curr_items.value
        if self.ws.doc_gen == 'ext_reverse_lookup':
            curr_items_tmp //= 4

        for _ in range(self.ws.n1ql_batch_size):
            key = self.existing_keys.next(curr_items=curr_items_tmp,
                                          curr_deletes=0)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    def create(self):
        with self.lock:
            self.curr_items.value += self.ws.n1ql_batch_size
            curr_items_tmp = self.curr_items.value - self.ws.n1ql_batch_size

        for _ in range(self.ws.n1ql_batch_size):
            curr_items_tmp += 1
            key = self.new_keys.next(curr_items=curr_items_tmp)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    def update(self):
        with self.lock:
            curr_items_tmp = self.curr_items.value

        for _ in range(self.ws.n1ql_batch_size):
            key = self.keys_for_cas_update.next(sid=self.sid,
                                                curr_items=curr_items_tmp)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    @with_sleep
    def do_batch(self):
        if self.ws.n1ql_op == 'read':
            self.read()
        elif self.ws.n1ql_op == 'create':
            self.create()
        elif self.ws.n1ql_op == 'update':
            self.update()

    def run(self, sid, lock, curr_ops, curr_items, *args):
        if self.throughput < float('inf'):
            self.target_time = self.ws.n1ql_batch_size * self.total_workers / \
                float(self.throughput)
        else:
            self.target_time = None
        self.lock = lock
        self.sid = sid
        self.curr_items = curr_items

        try:
            logger.info('Started: {}-{}'.format(self.NAME, self.sid))
            while not self.time_to_stop():
                self.do_batch()
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.reservoir.dump(filename='{}-{}'.format(self.NAME, self.sid))
コード例 #3
0
class KVWorker(Worker):

    NAME = 'kv-worker'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.reservoir = Reservoir(num_workers=self.ws.workers)

    def gen_cmd_sequence(self, cb=None, extras=None) -> Sequence:
        ops = \
            ['c'] * self.ws.creates + \
            ['r'] * self.ws.reads + \
            ['u'] * self.ws.updates + \
            ['d'] * self.ws.deletes + \
            ['ru'] * (self.ws.reads_and_updates // 2) + \
            ['fus'] * self.ws.fts_updates_swap + \
            ['fur'] * self.ws.fts_updates_reverse
        random.shuffle(ops)

        curr_items_tmp = curr_items_spot = self.curr_items.value
        if self.ws.creates:
            with self.lock:
                self.curr_items.value += self.ws.creates
                curr_items_tmp = self.curr_items.value - self.ws.creates
            curr_items_spot = (curr_items_tmp -
                               self.ws.creates * self.ws.workers)

        deleted_items_tmp = deleted_spot = 0
        if self.ws.deletes:
            with self.lock:
                self.deleted_items.value += self.ws.deletes
                deleted_items_tmp = self.deleted_items.value - self.ws.deletes
            deleted_spot = (deleted_items_tmp +
                            self.ws.deletes * self.ws.workers)

        if not cb:
            cb = self.cb

        cmds = []
        for op in ops:
            if op == 'c':
                key = self.new_keys.next(curr_items_tmp)
                doc = self.docs.next(key)
                curr_items_tmp += 1
                cmds.append((None, cb.create, (key.string, doc)))
            elif op == 'r':
                key = self.existing_keys.next(curr_items_spot, deleted_spot)

                if extras == 'subdoc':
                    cmds.append(
                        ('get', cb.read, (key.string, self.ws.subdoc_field)))
                elif extras == 'xattr':
                    cmds.append(('get', cb.read_xattr, (key.string,
                                                        self.ws.xattr_field)))
                else:
                    cmds.append(('get', cb.read, (key.string, )))
            elif op == 'u':
                key = self.existing_keys.next(curr_items_spot, deleted_spot,
                                              self.current_hot_load_start,
                                              self.timer_elapse)
                doc = self.docs.next(key)

                if extras == 'subdoc':
                    cmds.append(('set', cb.update,
                                 (key.string, self.ws.subdoc_field, doc)))
                elif extras == 'xattr':
                    cmds.append(('set', cb.update_xattr,
                                 (key.string, self.ws.xattr_field, doc)))
                else:
                    cmds.append(('set', cb.update, (key.string, doc)))
            elif op == 'd':
                key = self.keys_for_removal.next(deleted_items_tmp)
                deleted_items_tmp += 1
                cmds.append((None, cb.delete, (key.string, )))
            elif op == 'ru':
                key = self.existing_keys.next(curr_items_spot, deleted_spot)
                doc = self.docs.next(key)

                cmds.append(('get', cb.read, (key.string, )))
                cmds.append(('set', cb.update, (key.string, doc)))
            elif op == 'fus':
                key = self.fts_keys.next()
                cmds.append((None, self.do_fts_updates_swap, (key, )))
            elif op == 'fur':
                key = self.fts_keys.next()
                cmds.append((None, self.do_fts_updates_reverse, (key, )))
        return cmds

    def do_fts_updates_swap(self, key):
        doc = self.cb.client.get(key).value
        if 'text' in doc and 'text2' in doc:
            tmp = doc["text2"]
            doc["text2"] = doc["text"]
            doc["text"] = tmp
        elif 'time' in doc:
            if randint(0, 1):
                doc["time"] = int(doc["time"]) >> 1
            else:
                doc["time"] = int(doc["time"]) << 1
        else:
            return
        self.cb.client.set(key, doc)

    def do_fts_updates_reverse(self, key):
        doc = self.cb.client.get(key).value
        words = doc["name"].split(' ')
        if len(words):
            doc["name"] = ' '.join(words[::-1])
            self.cb.client.set(key, doc)

    @with_sleep
    def do_batch(self, *args, **kwargs):
        for cmd, func, args in self.gen_cmd_sequence():
            latency = func(*args)
            if latency is not None:
                self.reservoir.update(operation=cmd, value=latency)

    def run_condition(self, curr_ops):
        return curr_ops.value < self.ws.ops and not self.time_to_stop()

    def run(self,
            sid,
            lock,
            curr_ops,
            curr_items,
            deleted_items,
            current_hot_load_start=None,
            timer_elapse=None):

        if self.ws.throughput < float('inf'):
            self.target_time = float(self.BATCH_SIZE) * self.ws.workers / \
                self.ws.throughput
        else:
            self.target_time = None
        self.sid = sid
        self.lock = lock
        self.curr_items = curr_items
        self.deleted_items = deleted_items
        self.current_hot_load_start = current_hot_load_start
        self.timer_elapse = timer_elapse

        self.seed()

        logger.info('Started: {}-{}'.format(self.NAME, self.sid))
        try:
            while self.run_condition(curr_ops):
                with lock:
                    curr_ops.value += self.BATCH_SIZE
                self.do_batch()
                self.report_progress(curr_ops.value)
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.reservoir.dump(filename='{}-{}'.format(self.NAME, self.sid))
コード例 #4
0
ファイル: wgen.py プロジェクト: mahesh152/perfrunner
class N1QLWorker(Worker):

    NAME = 'n1ql-worker'

    def __init__(self, workload_settings, target_settings, shutdown_event):
        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)
        self.total_workers = workload_settings.n1ql_workers
        self.throughput = workload_settings.n1ql_throughput

        self.reservoir = Reservoir(num_workers=workload_settings.n1ql_workers)

        super(N1QLWorker, self).__init__(workload_settings, target_settings,
                                         shutdown_event)

    def init_keys(self):
        self.existing_keys = ExistingKey(self.ws.working_set,
                                         self.ws.working_set_access,
                                         prefix='n1ql')
        self.new_keys = NewKey(prefix='n1ql', expiration=self.ws.expiration)
        self.keys_for_casupdate = KeyForCASUpdate(self.total_workers,
                                                  self.ws.working_set,
                                                  self.ws.working_set_access,
                                                  prefix='n1ql')

    def init_docs(self):
        if self.ws.doc_gen == 'reverse_lookup':
            self.docs = ReverseLookupDocument(self.ws.size,
                                              prefix='n1ql')
        elif self.ws.doc_gen == 'reverse_range_lookup':
            self.docs = ReverseRangeLookupDocument(self.ws.size,
                                                   prefix='n1ql',
                                                   range_distance=self.ws.range_distance)
        elif self.ws.doc_gen == 'ext_reverse_lookup':
            self.docs = ExtReverseLookupDocument(self.ws.size,
                                                 prefix='n1ql',
                                                 num_docs=self.ws.items)
        elif self.ws.doc_gen == 'join':
            self.docs = JoinedDocument(self.ws.size,
                                       prefix='n1ql',
                                       num_docs=self.ws.items,
                                       num_categories=self.ws.num_categories,
                                       num_replies=self.ws.num_replies)
        elif self.ws.doc_gen == 'ref':
            self.docs = RefDocument(self.ws.size,
                                    prefix='n1ql')
        elif self.ws.doc_gen == 'array_indexing':
            self.docs = ArrayIndexingDocument(self.ws.size,
                                              prefix='n1ql',
                                              array_size=self.ws.array_size,
                                              num_docs=self.ws.items)

    def init_db(self):
        host, port = self.ts.node.split(':')
        self.cb = N1QLGen(bucket=self.ts.bucket, password=self.ts.password,
                          host=host, port=port)

    def read(self):
        curr_items_tmp = self.curr_items.value
        if self.ws.doc_gen == 'ext_reverse_lookup':
            curr_items_tmp /= 4

        for _ in range(self.BATCH_SIZE):
            key = self.existing_keys.next(curr_items=curr_items_tmp,
                                          curr_deletes=0)
            doc = self.docs.next(key)
            doc['key'] = key
            doc['bucket'] = self.ts.bucket
            query = self.new_queries.next(doc)

            _, latency = self.cb.query(query)
            self.reservoir.update(latency)

    def create(self):
        with self.lock:
            self.curr_items.value += self.BATCH_SIZE
            curr_items_tmp = self.curr_items.value - self.BATCH_SIZE

        for _ in range(self.BATCH_SIZE):
            curr_items_tmp += 1
            key, ttl = self.new_keys.next(curr_items=curr_items_tmp)
            doc = self.docs.next(key)
            doc['key'] = key
            doc['bucket'] = self.ts.bucket
            query = self.new_queries.next(doc)

            _, latency = self.cb.query(query)
            self.reservoir.update(latency)

    def update(self):
        with self.lock:
            self.cas_updated_items.value += self.BATCH_SIZE
            curr_items_tmp = self.curr_items.value - self.BATCH_SIZE

        for _ in range(self.BATCH_SIZE):
            key = self.keys_for_casupdate.next(self.sid,
                                               curr_items=curr_items_tmp,
                                               curr_deletes=0)
            doc = self.docs.next(key)
            doc['key'] = key
            doc['bucket'] = self.ts.bucket
            query = self.new_queries.next(doc)

            _, latency = self.cb.query(query)
            self.reservoir.update(latency)

    def range_update(self):
        with self.lock:
            self.cas_updated_items.value += self.BATCH_SIZE
            curr_items_tmp = self.curr_items.value - self.BATCH_SIZE

        for _ in range(self.BATCH_SIZE):
            key = self.keys_for_casupdate.next(self.sid,
                                               curr_items=curr_items_tmp,
                                               curr_deletes=0)
            doc = self.docs.next(key)
            doc['key'] = key
            doc['bucket'] = self.ts.bucket
            query = self.new_queries.next(doc)

            _, latency = self.cb.query(query)
            self.reservoir.update(latency)

    @with_sleep
    def do_batch(self):
        if self.ws.n1ql_op == 'read':
            self.read()
        elif self.ws.n1ql_op == 'create':
            self.create()
        elif self.ws.n1ql_op == 'update':
            self.update()
        elif self.ws.n1ql_op == 'rangeupdate':
            self.range_update()

    def run(self, sid, lock, curr_queries, curr_items, deleted_items,
            cas_updated_items):

        if self.throughput < float('inf'):
            self.target_time = float(self.BATCH_SIZE) * self.total_workers / \
                self.throughput
        else:
            self.target_time = None
        self.lock = lock
        self.sid = sid
        self.curr_items = curr_items
        self.cas_updated_items = cas_updated_items
        self.curr_queries = curr_queries

        try:
            logger.info('Started: {}-{}'.format(self.NAME, self.sid))
            while curr_queries.value < self.ws.ops and not self.time_to_stop():
                with self.lock:
                    curr_queries.value += self.BATCH_SIZE
                self.do_batch()
        except (KeyboardInterrupt, ValueFormatError, AttributeError) as e:
            logger.info('Interrupted: {}-{}-{}'.format(self.NAME, self.sid, e))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.reservoir.dump(filename='{}-{}'.format(self.NAME, self.sid))