コード例 #1
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.reservoir = Reservoir(num_workers=self.ws.workers)
     self.gen_duration = 0.0
     self.batch_duration = 0.0
     self.delta = 0.0
     self.op_delay = 0.0
コード例 #2
0
    def __init__(self,
                 workload_settings,
                 target_settings,
                 shutdown_event=None):
        super().__init__(workload_settings, target_settings, shutdown_event)

        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)

        self.reservoir = Reservoir(num_workers=self.ws.n1ql_workers)
コード例 #3
0
ファイル: wgen.py プロジェクト: bkumaran/perfrunner
class ViewWorker(Worker):

    NAME = 'query-worker'

    def __init__(self, workload_settings, target_settings, shutdown_event):
        super().__init__(workload_settings, target_settings, shutdown_event)

        self.total_workers = self.ws.query_workers
        self.throughput = self.ws.query_throughput

        self.reservoir = Reservoir(num_workers=self.ws.query_workers)

        if workload_settings.index_type is None:
            self.new_queries = ViewQueryGen(workload_settings.ddocs,
                                            workload_settings.query_params)
        else:
            self.new_queries = ViewQueryGenByType(
                workload_settings.index_type, workload_settings.query_params)

    @with_sleep
    def do_batch(self):
        curr_items_spot = \
            self.curr_items.value - self.ws.creates * self.ws.workers
        deleted_spot = \
            self.deleted_items.value + self.ws.deletes * self.ws.workers

        for _ in range(self.BATCH_SIZE):
            key = self.existing_keys.next(curr_items_spot, deleted_spot)
            doc = self.docs.next(key)
            ddoc_name, view_name, query = self.new_queries.next(doc)

            latency = self.cb.view_query(ddoc_name, view_name, query=query)

            self.reservoir.update(operation='query', value=latency)

    def run(self, sid, lock, curr_ops, curr_items, deleted_items, *args):
        self.cb.start_updater()

        if self.throughput < float('inf'):
            self.target_time = float(self.BATCH_SIZE) * self.total_workers / \
                self.throughput
        else:
            self.target_time = None
        self.sid = sid
        self.curr_items = curr_items
        self.deleted_items = deleted_items

        try:
            logger.info('Started: {}-{}'.format(self.NAME, self.sid))
            while not self.time_to_stop():
                self.do_batch()
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.dump_stats()
コード例 #4
0
    def __init__(self, workload_settings, target_settings, shutdown_event):
        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)
        self.total_workers = workload_settings.n1ql_workers
        self.throughput = workload_settings.n1ql_throughput

        self.reservoir = Reservoir(num_workers=workload_settings.n1ql_workers)

        super().__init__(workload_settings, target_settings, shutdown_event)

        self.init_creds()
コード例 #5
0
    def __init__(self, workload_settings, target_settings, shutdown_event):
        super().__init__(workload_settings, target_settings, shutdown_event)

        self.reservoir = Reservoir(num_workers=self.ws.query_workers)

        if workload_settings.index_type is None:
            self.new_queries = ViewQueryGen(workload_settings.ddocs,
                                            workload_settings.query_params)
        else:
            self.new_queries = ViewQueryGenByType(
                workload_settings.index_type, workload_settings.query_params)
コード例 #6
0
 def __init__(self,
              workload_settings,
              target_settings,
              shutdown_event=None):
     super().__init__(workload_settings, target_settings, shutdown_event)
     self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)
     self.reservoir = Reservoir(num_workers=self.ws.n1ql_workers)
     self.gen_duration = 0.0
     self.batch_duration = 0.0
     self.delta = 0.0
     self.op_delay = 0.0
     self.first = True
コード例 #7
0
ファイル: wgen.py プロジェクト: mahesh152/perfrunner
    def __init__(self, workload_settings, target_settings, shutdown_event):
        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)
        self.total_workers = workload_settings.n1ql_workers
        self.throughput = workload_settings.n1ql_throughput

        self.reservoir = Reservoir(num_workers=workload_settings.n1ql_workers)

        super(N1QLWorker, self).__init__(workload_settings, target_settings,
                                         shutdown_event)
コード例 #8
0
class N1QLWorker(Worker):

    NAME = 'query-worker'

    def __init__(self,
                 workload_settings,
                 target_settings,
                 shutdown_event=None):
        super().__init__(workload_settings, target_settings, shutdown_event)

        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)

        self.reservoir = Reservoir(num_workers=self.ws.n1ql_workers)

    def read(self):
        curr_items = self.curr_items.value
        if self.ws.doc_gen == 'ext_reverse_lookup':
            curr_items //= 4

        for _ in range(self.ws.n1ql_batch_size):
            key = self.existing_keys.next(curr_items=curr_items,
                                          curr_deletes=0)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    def create(self):
        with self.lock:
            curr_items = self.curr_items.value
            self.curr_items.value += self.ws.n1ql_batch_size

        for _ in range(self.ws.n1ql_batch_size):
            curr_items += 1
            key = self.new_keys.next(curr_items=curr_items)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    def update(self):
        with self.lock:
            curr_items = self.curr_items.value

        for _ in range(self.ws.n1ql_batch_size):
            key = self.keys_for_cas_update.next(sid=self.sid,
                                                curr_items=curr_items)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    @with_sleep
    def do_batch(self):
        if self.ws.n1ql_op == 'read':
            self.read()
        elif self.ws.n1ql_op == 'create':
            self.create()
        elif self.ws.n1ql_op == 'update':
            self.update()

    def run(self, sid, lock, curr_ops, curr_items, *args):
        if self.ws.n1ql_throughput < float('inf'):
            self.target_time = self.ws.n1ql_batch_size * self.ws.n1ql_workers / \
                float(self.ws.n1ql_throughput)
        else:
            self.target_time = None
        self.lock = lock
        self.sid = sid
        self.curr_items = curr_items

        try:
            logger.info('Started: {}-{}'.format(self.NAME, self.sid))
            while not self.time_to_stop():
                self.do_batch()
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.dump_stats()
コード例 #9
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.reservoir = Reservoir(num_workers=self.ws.workers)
コード例 #10
0
class KVWorker(Worker):

    NAME = 'kv-worker'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.reservoir = Reservoir(num_workers=self.ws.workers)

    @property
    def random_ops(self) -> List[str]:
        ops = \
            ['c'] * self.ws.creates + \
            ['r'] * self.ws.reads + \
            ['u'] * self.ws.updates + \
            ['d'] * self.ws.deletes + \
            ['m'] * (self.ws.reads_and_updates // 2)
        random.shuffle(ops)
        return ops

    def create_args(self, cb: Client, curr_items: int) -> Sequence:
        key = self.new_keys.next(curr_items)
        doc = self.docs.next(key)
        args = key.string, doc, self.ws.persist_to, self.ws.replicate_to

        return [('set', cb.create, args)]

    def read_args(self, cb: Client, curr_items: int,
                  deleted_items: int) -> Sequence:
        key = self.existing_keys.next(curr_items, deleted_items)
        args = key.string,

        return [('get', cb.read, args)]

    def update_args(self, cb: Client, curr_items: int,
                    deleted_items: int) -> Sequence:
        key = self.existing_keys.next(curr_items, deleted_items,
                                      self.current_hot_load_start,
                                      self.timer_elapse)
        doc = self.docs.next(key)
        args = key.string, doc, self.ws.persist_to, self.ws.replicate_to

        return [('set', cb.update, args)]

    def delete_args(self, cb: Client, deleted_items: int) -> Sequence:
        key = self.keys_for_removal.next(deleted_items)
        args = key.string,

        return [('delete', cb.delete, args)]

    def modify_args(self, cb: Client, curr_items: int,
                    deleted_items: int) -> Sequence:
        key = self.existing_keys.next(curr_items, deleted_items)
        doc = self.docs.next(key)
        read_args = key.string,
        update_args = key.string, doc, self.ws.persist_to, self.ws.replicate_to

        return [('get', cb.read, read_args), ('set', cb.update, update_args)]

    def gen_cmd_sequence(self, cb: Client = None) -> Sequence:
        if not cb:
            cb = self.cb

        curr_items = self.ws.items
        if self.ws.creates:
            with self.lock:
                curr_items = self.curr_items.value
                self.curr_items.value += self.ws.creates

        deleted_items = 0
        if self.ws.deletes:
            with self.lock:
                deleted_items = self.deleted_items.value + \
                    self.ws.deletes * self.ws.workers
                self.deleted_items.value += self.ws.deletes

        cmds = []
        for op in self.random_ops:
            if op == 'c':
                cmds += self.create_args(cb, curr_items)
                curr_items += 1
            elif op == 'r':
                cmds += self.read_args(cb, curr_items, deleted_items)
            elif op == 'u':
                cmds += self.update_args(cb, curr_items, deleted_items)
            elif op == 'd':
                cmds += self.delete_args(cb, deleted_items)
                deleted_items += 1
            elif op == 'm':
                cmds += self.modify_args(cb, curr_items, deleted_items)
        return cmds

    @with_sleep
    def do_batch(self, *args, **kwargs):
        for cmd, func, args in self.gen_cmd_sequence():
            latency = func(*args)
            if latency is not None:
                self.reservoir.update(operation=cmd, value=latency)

    def run_condition(self, curr_ops):
        return curr_ops.value < self.ws.ops and not self.time_to_stop()

    def run(self,
            sid,
            lock,
            curr_ops,
            curr_items,
            deleted_items,
            current_hot_load_start=None,
            timer_elapse=None):

        if self.ws.throughput < float('inf'):
            self.target_time = float(self.BATCH_SIZE) * self.ws.workers / \
                self.ws.throughput
        else:
            self.target_time = None
        self.sid = sid
        self.lock = lock
        self.curr_items = curr_items
        self.deleted_items = deleted_items
        self.current_hot_load_start = current_hot_load_start
        self.timer_elapse = timer_elapse

        self.seed()

        logger.info('Started: {}-{}'.format(self.NAME, self.sid))
        try:
            while self.run_condition(curr_ops):
                with lock:
                    curr_ops.value += self.BATCH_SIZE
                self.do_batch()
                self.report_progress(curr_ops.value)
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.dump_stats()
コード例 #11
0
ファイル: wgen.py プロジェクト: bkumaran/perfrunner
class N1QLWorker(Worker):

    NAME = 'query-worker'

    def __init__(self,
                 workload_settings,
                 target_settings,
                 shutdown_event=None):
        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)
        self.total_workers = workload_settings.n1ql_workers
        self.throughput = workload_settings.n1ql_throughput

        super().__init__(workload_settings, target_settings, shutdown_event)

        self.init_creds()

        self.reservoir = Reservoir(num_workers=self.ws.n1ql_workers)

    def init_keys(self):
        self.new_keys = NewOrderedKey(prefix='n1ql', fmtr=self.ws.key_fmtr)

        self.existing_keys = UniformKey(prefix='n1ql', fmtr=self.ws.key_fmtr)

        self.keys_for_cas_update = KeyForCASUpdate(
            total_workers=self.total_workers,
            prefix='n1ql',
            fmtr=self.ws.key_fmtr)

    def init_docs(self):
        if self.ws.doc_gen == 'reverse_lookup':
            self.docs = ReverseLookupDocument(self.ws.size, prefix='n1ql')
        elif self.ws.doc_gen == 'reverse_range_lookup':
            self.docs = ReverseRangeLookupDocument(
                self.ws.size,
                prefix='n1ql',
                range_distance=self.ws.range_distance)
        elif self.ws.doc_gen == 'ext_reverse_lookup':
            self.docs = ExtReverseLookupDocument(self.ws.size,
                                                 prefix='n1ql',
                                                 num_docs=self.ws.items)
        elif self.ws.doc_gen == 'join':
            self.docs = JoinedDocument(self.ws.size,
                                       prefix='n1ql',
                                       num_docs=self.ws.items,
                                       num_categories=self.ws.num_categories,
                                       num_replies=self.ws.num_replies)
        elif self.ws.doc_gen == 'ref':
            self.docs = RefDocument(self.ws.size, prefix='n1ql')
        elif self.ws.doc_gen == 'profile':
            self.docs = ProfileDocument(self.ws.size, prefix='n1ql')
        elif self.ws.doc_gen == 'array_indexing':
            self.docs = ArrayIndexingDocument(self.ws.size,
                                              prefix='n1ql',
                                              array_size=self.ws.array_size,
                                              num_docs=self.ws.items)

    def init_creds(self):
        for bucket in getattr(self.ws, 'buckets', []):
            self.cb.client.add_bucket_creds(bucket, self.ts.password)

    def read(self):
        curr_items_tmp = self.curr_items.value
        if self.ws.doc_gen == 'ext_reverse_lookup':
            curr_items_tmp //= 4

        for _ in range(self.ws.n1ql_batch_size):
            key = self.existing_keys.next(curr_items=curr_items_tmp,
                                          curr_deletes=0)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    def create(self):
        with self.lock:
            self.curr_items.value += self.ws.n1ql_batch_size
            curr_items_tmp = self.curr_items.value - self.ws.n1ql_batch_size

        for _ in range(self.ws.n1ql_batch_size):
            curr_items_tmp += 1
            key = self.new_keys.next(curr_items=curr_items_tmp)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    def update(self):
        with self.lock:
            curr_items_tmp = self.curr_items.value

        for _ in range(self.ws.n1ql_batch_size):
            key = self.keys_for_cas_update.next(sid=self.sid,
                                                curr_items=curr_items_tmp)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    @with_sleep
    def do_batch(self):
        if self.ws.n1ql_op == 'read':
            self.read()
        elif self.ws.n1ql_op == 'create':
            self.create()
        elif self.ws.n1ql_op == 'update':
            self.update()

    def run(self, sid, lock, curr_ops, curr_items, *args):
        if self.throughput < float('inf'):
            self.target_time = self.ws.n1ql_batch_size * self.total_workers / \
                float(self.throughput)
        else:
            self.target_time = None
        self.lock = lock
        self.sid = sid
        self.curr_items = curr_items

        try:
            logger.info('Started: {}-{}'.format(self.NAME, self.sid))
            while not self.time_to_stop():
                self.do_batch()
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.dump_stats()
コード例 #12
0
ファイル: wgen.py プロジェクト: bkumaran/perfrunner
class KVWorker(Worker):

    NAME = 'kv-worker'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.reservoir = Reservoir(num_workers=self.ws.workers)

    def gen_cmd_sequence(self, cb=None, extras=None) -> Sequence:
        ops = \
            ['c'] * self.ws.creates + \
            ['r'] * self.ws.reads + \
            ['u'] * self.ws.updates + \
            ['d'] * self.ws.deletes + \
            ['ru'] * (self.ws.reads_and_updates // 2) + \
            ['fus'] * self.ws.fts_updates_swap + \
            ['fur'] * self.ws.fts_updates_reverse
        random.shuffle(ops)

        curr_items_tmp = curr_items_spot = self.curr_items.value
        if self.ws.creates:
            with self.lock:
                self.curr_items.value += self.ws.creates
                curr_items_tmp = self.curr_items.value - self.ws.creates
            curr_items_spot = (curr_items_tmp -
                               self.ws.creates * self.ws.workers)

        deleted_items_tmp = deleted_spot = 0
        if self.ws.deletes:
            with self.lock:
                self.deleted_items.value += self.ws.deletes
                deleted_items_tmp = self.deleted_items.value - self.ws.deletes
            deleted_spot = (deleted_items_tmp +
                            self.ws.deletes * self.ws.workers)

        if not cb:
            cb = self.cb

        cmds = []
        for op in ops:
            if op == 'c':
                key = self.new_keys.next(curr_items_tmp)
                doc = self.docs.next(key)
                curr_items_tmp += 1
                cmds.append((None, cb.create, (key.string, doc)))
            elif op == 'r':
                key = self.existing_keys.next(curr_items_spot, deleted_spot)

                if extras == 'subdoc':
                    cmds.append(
                        ('get', cb.read, (key.string, self.ws.subdoc_field)))
                elif extras == 'xattr':
                    cmds.append(('get', cb.read_xattr, (key.string,
                                                        self.ws.xattr_field)))
                else:
                    cmds.append(('get', cb.read, (key.string, )))
            elif op == 'u':
                key = self.existing_keys.next(curr_items_spot, deleted_spot,
                                              self.current_hot_load_start,
                                              self.timer_elapse)
                doc = self.docs.next(key)

                if extras == 'subdoc':
                    cmds.append(('set', cb.update,
                                 (key.string, self.ws.subdoc_field, doc)))
                elif extras == 'xattr':
                    cmds.append(('set', cb.update_xattr,
                                 (key.string, self.ws.xattr_field, doc)))
                else:
                    cmds.append(('set', cb.update, (key.string, doc)))
            elif op == 'd':
                key = self.keys_for_removal.next(deleted_items_tmp)
                deleted_items_tmp += 1
                cmds.append((None, cb.delete, (key.string, )))
            elif op == 'ru':
                key = self.existing_keys.next(curr_items_spot, deleted_spot)
                doc = self.docs.next(key)

                cmds.append(('get', cb.read, (key.string, )))
                cmds.append(('set', cb.update, (key.string, doc)))
            elif op == 'fus':
                key = self.fts_keys.next()
                cmds.append((None, self.do_fts_updates_swap, (key, )))
            elif op == 'fur':
                key = self.fts_keys.next()
                cmds.append((None, self.do_fts_updates_reverse, (key, )))
        return cmds

    def do_fts_updates_swap(self, key):
        doc = self.cb.client.get(key).value
        if 'text' in doc and 'text2' in doc:
            tmp = doc["text2"]
            doc["text2"] = doc["text"]
            doc["text"] = tmp
        elif 'time' in doc:
            if randint(0, 1):
                doc["time"] = int(doc["time"]) >> 1
            else:
                doc["time"] = int(doc["time"]) << 1
        else:
            return
        self.cb.client.set(key, doc)

    def do_fts_updates_reverse(self, key):
        doc = self.cb.client.get(key).value
        words = doc["name"].split(' ')
        if len(words):
            doc["name"] = ' '.join(words[::-1])
            self.cb.client.set(key, doc)

    @with_sleep
    def do_batch(self, *args, **kwargs):
        for cmd, func, args in self.gen_cmd_sequence():
            latency = func(*args)
            if latency is not None:
                self.reservoir.update(operation=cmd, value=latency)

    def run_condition(self, curr_ops):
        return curr_ops.value < self.ws.ops and not self.time_to_stop()

    def run(self,
            sid,
            lock,
            curr_ops,
            curr_items,
            deleted_items,
            current_hot_load_start=None,
            timer_elapse=None):

        if self.ws.throughput < float('inf'):
            self.target_time = float(self.BATCH_SIZE) * self.ws.workers / \
                self.ws.throughput
        else:
            self.target_time = None
        self.sid = sid
        self.lock = lock
        self.curr_items = curr_items
        self.deleted_items = deleted_items
        self.current_hot_load_start = current_hot_load_start
        self.timer_elapse = timer_elapse

        self.seed()

        logger.info('Started: {}-{}'.format(self.NAME, self.sid))
        try:
            while self.run_condition(curr_ops):
                with lock:
                    curr_ops.value += self.BATCH_SIZE
                self.do_batch()
                self.report_progress(curr_ops.value)
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.dump_stats()
コード例 #13
0
class N1QLWorker(Worker):

    NAME = 'query-worker'

    def __init__(self,
                 workload_settings,
                 target_settings,
                 shutdown_event=None):
        super().__init__(workload_settings, target_settings, shutdown_event)
        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)
        self.reservoir = Reservoir(num_workers=self.ws.n1ql_workers)
        self.gen_duration = 0.0
        self.batch_duration = 0.0
        self.delta = 0.0
        self.op_delay = 0.0
        self.first = True

    def read(self):
        if self.target_time:
            t0 = time.time()
            self.op_delay = self.op_delay + (self.delta /
                                             self.ws.n1ql_batch_size)

        curr_items = self.curr_items.value
        if self.ws.doc_gen == 'ext_reverse_lookup':
            curr_items //= 4

        for i in range(self.ws.n1ql_batch_size):
            key = self.existing_keys.next(curr_items=curr_items,
                                          curr_deletes=0)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            if not self.first:
                self.reservoir.update(operation='query', value=latency)
            else:
                self.first = False

            if self.op_delay > 0 and self.target_time:
                time.sleep(self.op_delay * self.CORRECTION_FACTOR)

            if not i % 5:
                if self.time_to_stop():
                    return

        if self.target_time:
            self.batch_duration = time.time() - t0
            self.delta = self.target_time - self.batch_duration
            if self.delta > 0:
                time.sleep(self.CORRECTION_FACTOR * self.delta)

    @with_sleep
    def create(self):
        with self.gen_lock:
            curr_items = self.curr_items.value
            self.curr_items.value += self.ws.n1ql_batch_size

        for i in range(self.ws.n1ql_batch_size):
            curr_items += 1
            key = self.new_keys.next(curr_items=curr_items)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

            if not i % 5:
                if self.time_to_stop():
                    return

    @with_sleep
    def update(self):
        with self.gen_lock:
            curr_items = self.curr_items.value

        for i in range(self.ws.n1ql_batch_size):
            key = self.keys_for_cas_update.next(sid=self.sid,
                                                curr_items=curr_items)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

            if not i % 5:
                if self.time_to_stop():
                    return

    def do_batch(self):
        if self.ws.n1ql_op == 'read':
            self.read()
        elif self.ws.n1ql_op == 'create':
            self.create()
        elif self.ws.n1ql_op == 'update':
            self.update()

    def run(self, sid, locks, curr_ops, curr_items, *args):
        if self.ws.n1ql_throughput < float('inf'):
            self.target_time = self.ws.n1ql_batch_size * self.ws.n1ql_workers / \
                float(self.ws.n1ql_throughput)
        else:
            self.target_time = None
        self.locks = locks
        self.gen_lock = locks[0]
        self.batch_lock = locks[1]
        self.sid = sid
        self.curr_items = curr_items

        try:
            while not self.time_to_stop():
                self.do_batch()
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.dump_stats()
コード例 #14
0
class KVWorker(Worker):

    NAME = 'kv-worker'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.reservoir = Reservoir(num_workers=self.ws.workers *
                                   len(self.ws.bucket_list))
        self.gen_duration = 0.0
        self.batch_duration = 0.0
        self.delta = 0.0
        self.op_delay = 0.0

    @property
    def random_ops(self) -> List[str]:
        ops = \
            ['c'] * self.ws.creates + \
            ['r'] * self.ws.reads + \
            ['u'] * self.ws.updates + \
            ['d'] * self.ws.deletes + \
            ['m'] * (self.ws.reads_and_updates // 2)
        random.shuffle(ops)
        return ops

    def create_args(self, cb: Client, curr_items: int) -> Sequence:
        key = self.new_keys.next(curr_items)
        doc = self.docs.next(key)
        if self.ws.durability:
            args = key.string, doc, self.ws.durability, self.ws.ttl
            return [('set', cb.update_durable, args)]
        else:
            args = key.string, doc, self.ws.persist_to, self.ws.replicate_to, self.ws.ttl
            return [('set', cb.update, args)]

    def read_args(self, cb: Client, curr_items: int,
                  deleted_items: int) -> Sequence:
        key = self.existing_keys.next(curr_items, deleted_items)
        args = key.string,

        return [('get', cb.read, args)]

    def update_args(self, cb: Client, curr_items: int,
                    deleted_items: int) -> Sequence:
        key = self.existing_keys.next(curr_items, deleted_items,
                                      self.current_hot_load_start,
                                      self.timer_elapse)
        doc = self.docs.next(key)
        if self.ws.durability:
            args = key.string, doc, self.ws.durability, self.ws.ttl
            return [('set', cb.update_durable, args)]
        else:
            args = key.string, doc, self.ws.persist_to, self.ws.replicate_to, self.ws.ttl
            return [('set', cb.update, args)]

    def delete_args(self, cb: Client, deleted_items: int) -> Sequence:
        key = self.keys_for_removal.next(deleted_items)
        args = key.string,

        return [('delete', cb.delete, args)]

    def modify_args(self, cb: Client, curr_items: int,
                    deleted_items: int) -> Sequence:
        key = self.existing_keys.next(curr_items, deleted_items)
        doc = self.docs.next(key)
        read_args = key.string,
        update_args = key.string, doc, self.ws.persist_to, self.ws.replicate_to, self.ws.ttl

        return [('get', cb.read, read_args), ('set', cb.update, update_args)]

    def gen_cmd_sequence(self, cb: Client = None) -> Sequence:
        if not cb:
            cb = self.cb

        curr_items = self.ws.items
        deleted_items = 0
        if self.ws.creates or self.ws.deletes:
            with self.gen_lock:
                curr_items = self.curr_items.value
                self.curr_items.value += self.ws.creates
                deleted_items = \
                    self.deleted_items.value + self.ws.deletes * self.ws.workers
                self.deleted_items.value += self.ws.deletes

        cmds = []
        for op in self.random_ops:
            if op == 'c':
                cmds += self.create_args(cb, curr_items)
                curr_items += 1
            elif op == 'r':
                cmds += self.read_args(cb, curr_items, deleted_items)
            elif op == 'u':
                cmds += self.update_args(cb, curr_items, deleted_items)
            elif op == 'd':
                cmds += self.delete_args(cb, deleted_items)
                deleted_items += 1
            elif op == 'm':
                cmds += self.modify_args(cb, curr_items, deleted_items)
        return cmds

    def do_batch(self, *args, **kwargs):
        op_count = 0
        if self.target_time is None:
            cmd_seq = self.gen_cmd_sequence()
            for cmd, func, args in cmd_seq:
                latency = func(*args)
                if latency is not None:
                    self.reservoir.update(operation=cmd, value=latency)
                if not op_count % 5:
                    if self.time_to_stop():
                        return
                op_count += 1
        else:
            t0 = time.time()
            self.op_delay = self.op_delay + (self.delta /
                                             self.ws.spring_batch_size)
            cmd_seq = self.gen_cmd_sequence()
            self.gen_duration = time.time() - t0
            for cmd, func, args in cmd_seq:
                latency = func(*args)
                if latency is not None:
                    self.reservoir.update(operation=cmd, value=latency)
                if self.op_delay > 0:
                    time.sleep(self.op_delay * self.CORRECTION_FACTOR)
                if not op_count % 5:
                    if self.time_to_stop():
                        return
                op_count += 1
            self.batch_duration = time.time() - t0
            self.delta = self.target_time - self.batch_duration
            if self.delta > 0:
                time.sleep(self.CORRECTION_FACTOR * self.delta)

    def run_condition(self, curr_ops):
        return curr_ops.value < self.ws.ops and not self.time_to_stop()

    def run(self,
            sid,
            locks,
            curr_ops,
            curr_items,
            deleted_items,
            current_hot_load_start=None,
            timer_elapse=None):
        if self.ws.throughput < float('inf'):
            self.target_time = float(self.ws.spring_batch_size) * self.ws.workers / \
                self.ws.throughput
        else:
            self.target_time = None
        self.sid = sid
        self.locks = locks
        self.gen_lock = locks[0]
        self.batch_lock = locks[1]
        self.curr_items = curr_items
        self.deleted_items = deleted_items
        self.current_hot_load_start = current_hot_load_start
        self.timer_elapse = timer_elapse
        self.seed()

        try:
            while self.run_condition(curr_ops):
                with self.batch_lock:
                    curr_ops.value += self.ws.spring_batch_size
                self.do_batch()
                self.report_progress(curr_ops.value)
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}-{}'.format(self.NAME, self.sid,
                                                       self.ts.bucket))
        else:
            logger.info('Finished: {}-{}-{}'.format(self.NAME, self.sid,
                                                    self.ts.bucket))

        self.dump_stats()
コード例 #15
0
ファイル: wgen.py プロジェクト: mahesh152/perfrunner
class N1QLWorker(Worker):

    NAME = 'n1ql-worker'

    def __init__(self, workload_settings, target_settings, shutdown_event):
        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)
        self.total_workers = workload_settings.n1ql_workers
        self.throughput = workload_settings.n1ql_throughput

        self.reservoir = Reservoir(num_workers=workload_settings.n1ql_workers)

        super(N1QLWorker, self).__init__(workload_settings, target_settings,
                                         shutdown_event)

    def init_keys(self):
        self.existing_keys = ExistingKey(self.ws.working_set,
                                         self.ws.working_set_access,
                                         prefix='n1ql')
        self.new_keys = NewKey(prefix='n1ql', expiration=self.ws.expiration)
        self.keys_for_casupdate = KeyForCASUpdate(self.total_workers,
                                                  self.ws.working_set,
                                                  self.ws.working_set_access,
                                                  prefix='n1ql')

    def init_docs(self):
        if self.ws.doc_gen == 'reverse_lookup':
            self.docs = ReverseLookupDocument(self.ws.size,
                                              prefix='n1ql')
        elif self.ws.doc_gen == 'reverse_range_lookup':
            self.docs = ReverseRangeLookupDocument(self.ws.size,
                                                   prefix='n1ql',
                                                   range_distance=self.ws.range_distance)
        elif self.ws.doc_gen == 'ext_reverse_lookup':
            self.docs = ExtReverseLookupDocument(self.ws.size,
                                                 prefix='n1ql',
                                                 num_docs=self.ws.items)
        elif self.ws.doc_gen == 'join':
            self.docs = JoinedDocument(self.ws.size,
                                       prefix='n1ql',
                                       num_docs=self.ws.items,
                                       num_categories=self.ws.num_categories,
                                       num_replies=self.ws.num_replies)
        elif self.ws.doc_gen == 'ref':
            self.docs = RefDocument(self.ws.size,
                                    prefix='n1ql')
        elif self.ws.doc_gen == 'array_indexing':
            self.docs = ArrayIndexingDocument(self.ws.size,
                                              prefix='n1ql',
                                              array_size=self.ws.array_size,
                                              num_docs=self.ws.items)

    def init_db(self):
        host, port = self.ts.node.split(':')
        self.cb = N1QLGen(bucket=self.ts.bucket, password=self.ts.password,
                          host=host, port=port)

    def read(self):
        curr_items_tmp = self.curr_items.value
        if self.ws.doc_gen == 'ext_reverse_lookup':
            curr_items_tmp /= 4

        for _ in range(self.BATCH_SIZE):
            key = self.existing_keys.next(curr_items=curr_items_tmp,
                                          curr_deletes=0)
            doc = self.docs.next(key)
            doc['key'] = key
            doc['bucket'] = self.ts.bucket
            query = self.new_queries.next(doc)

            _, latency = self.cb.query(query)
            self.reservoir.update(latency)

    def create(self):
        with self.lock:
            self.curr_items.value += self.BATCH_SIZE
            curr_items_tmp = self.curr_items.value - self.BATCH_SIZE

        for _ in range(self.BATCH_SIZE):
            curr_items_tmp += 1
            key, ttl = self.new_keys.next(curr_items=curr_items_tmp)
            doc = self.docs.next(key)
            doc['key'] = key
            doc['bucket'] = self.ts.bucket
            query = self.new_queries.next(doc)

            _, latency = self.cb.query(query)
            self.reservoir.update(latency)

    def update(self):
        with self.lock:
            self.cas_updated_items.value += self.BATCH_SIZE
            curr_items_tmp = self.curr_items.value - self.BATCH_SIZE

        for _ in range(self.BATCH_SIZE):
            key = self.keys_for_casupdate.next(self.sid,
                                               curr_items=curr_items_tmp,
                                               curr_deletes=0)
            doc = self.docs.next(key)
            doc['key'] = key
            doc['bucket'] = self.ts.bucket
            query = self.new_queries.next(doc)

            _, latency = self.cb.query(query)
            self.reservoir.update(latency)

    def range_update(self):
        with self.lock:
            self.cas_updated_items.value += self.BATCH_SIZE
            curr_items_tmp = self.curr_items.value - self.BATCH_SIZE

        for _ in range(self.BATCH_SIZE):
            key = self.keys_for_casupdate.next(self.sid,
                                               curr_items=curr_items_tmp,
                                               curr_deletes=0)
            doc = self.docs.next(key)
            doc['key'] = key
            doc['bucket'] = self.ts.bucket
            query = self.new_queries.next(doc)

            _, latency = self.cb.query(query)
            self.reservoir.update(latency)

    @with_sleep
    def do_batch(self):
        if self.ws.n1ql_op == 'read':
            self.read()
        elif self.ws.n1ql_op == 'create':
            self.create()
        elif self.ws.n1ql_op == 'update':
            self.update()
        elif self.ws.n1ql_op == 'rangeupdate':
            self.range_update()

    def run(self, sid, lock, curr_queries, curr_items, deleted_items,
            cas_updated_items):

        if self.throughput < float('inf'):
            self.target_time = float(self.BATCH_SIZE) * self.total_workers / \
                self.throughput
        else:
            self.target_time = None
        self.lock = lock
        self.sid = sid
        self.curr_items = curr_items
        self.cas_updated_items = cas_updated_items
        self.curr_queries = curr_queries

        try:
            logger.info('Started: {}-{}'.format(self.NAME, self.sid))
            while curr_queries.value < self.ws.ops and not self.time_to_stop():
                with self.lock:
                    curr_queries.value += self.BATCH_SIZE
                self.do_batch()
        except (KeyboardInterrupt, ValueFormatError, AttributeError) as e:
            logger.info('Interrupted: {}-{}-{}'.format(self.NAME, self.sid, e))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.reservoir.dump(filename='{}-{}'.format(self.NAME, self.sid))