Esempio n. 1
0
    def init_keys(self):
        self.new_keys = NewOrderedKey(prefix='n1ql')

        self.existing_keys = UniformKey(prefix='n1ql')

        self.keys_for_cas_update = KeyForCASUpdate(self.total_workers,
                                                   prefix='n1ql')
Esempio n. 2
0
    def init_keys(self):
        self.new_keys = NewOrderedKey(prefix=self.ts.prefix,
                                      fmtr=self.ws.key_fmtr)

        if self.ws.working_set_move_time:
            self.existing_keys = MovingWorkingSetKey(self.ws, self.ts.prefix)
        elif self.ws.working_set < 100:
            self.existing_keys = WorkingSetKey(self.ws, self.ts.prefix)
        elif self.ws.power_alpha:
            self.existing_keys = PowerKey(self.ts.prefix, self.ws.key_fmtr,
                                          self.ws.power_alpha)
        elif self.ws.zipf_alpha:
            self.existing_keys = ZipfKey(self.ts.prefix, self.ws.key_fmtr,
                                         self.ws.zipf_alpha)
        else:
            self.existing_keys = UniformKey(self.ts.prefix, self.ws.key_fmtr)

        self.keys_for_removal = KeyForRemoval(self.ts.prefix, self.ws.key_fmtr)

        self.keys_for_cas_update = KeyForCASUpdate(self.ws.n1ql_workers,
                                                   self.ts.prefix,
                                                   self.ws.key_fmtr)
        logger.info(
            "existing_keys {}, keys_for_removal {}, keys_for_cas_update {}".
            format(self.existing_keys, self.keys_for_removal,
                   self.keys_for_cas_update))
Esempio n. 3
0
    def init_keys(self):
        self.new_keys = NewOrderedKey(prefix=self.ts.prefix,
                                      fmtr=self.ws.key_fmtr)

        if self.ws.working_set_move_time:
            self.existing_keys = MovingWorkingSetKey(self.ws, self.ts.prefix)
        elif self.ws.working_set < 100:
            self.existing_keys = WorkingSetKey(self.ws, self.ts.prefix)
        else:
            self.existing_keys = UniformKey(self.ts.prefix, self.ws.key_fmtr)

        self.keys_for_removal = KeyForRemoval(self.ts.prefix, self.ws.key_fmtr)

        self.fts_keys = FTSKey(self.ws)
Esempio n. 4
0
class N1QLWorker(Worker):

    NAME = 'query-worker'

    def __init__(self,
                 workload_settings,
                 target_settings,
                 shutdown_event=None):
        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)
        self.total_workers = workload_settings.n1ql_workers
        self.throughput = workload_settings.n1ql_throughput

        super().__init__(workload_settings, target_settings, shutdown_event)

        self.init_creds()

        self.reservoir = Reservoir(num_workers=self.ws.n1ql_workers)

    def init_keys(self):
        self.new_keys = NewOrderedKey(prefix='n1ql', fmtr=self.ws.key_fmtr)

        self.existing_keys = UniformKey(prefix='n1ql', fmtr=self.ws.key_fmtr)

        self.keys_for_cas_update = KeyForCASUpdate(
            total_workers=self.total_workers,
            prefix='n1ql',
            fmtr=self.ws.key_fmtr)

    def init_docs(self):
        if self.ws.doc_gen == 'reverse_lookup':
            self.docs = ReverseLookupDocument(self.ws.size, prefix='n1ql')
        elif self.ws.doc_gen == 'reverse_range_lookup':
            self.docs = ReverseRangeLookupDocument(
                self.ws.size,
                prefix='n1ql',
                range_distance=self.ws.range_distance)
        elif self.ws.doc_gen == 'ext_reverse_lookup':
            self.docs = ExtReverseLookupDocument(self.ws.size,
                                                 prefix='n1ql',
                                                 num_docs=self.ws.items)
        elif self.ws.doc_gen == 'join':
            self.docs = JoinedDocument(self.ws.size,
                                       prefix='n1ql',
                                       num_docs=self.ws.items,
                                       num_categories=self.ws.num_categories,
                                       num_replies=self.ws.num_replies)
        elif self.ws.doc_gen == 'ref':
            self.docs = RefDocument(self.ws.size, prefix='n1ql')
        elif self.ws.doc_gen == 'profile':
            self.docs = ProfileDocument(self.ws.size, prefix='n1ql')
        elif self.ws.doc_gen == 'array_indexing':
            self.docs = ArrayIndexingDocument(self.ws.size,
                                              prefix='n1ql',
                                              array_size=self.ws.array_size,
                                              num_docs=self.ws.items)

    def init_creds(self):
        for bucket in getattr(self.ws, 'buckets', []):
            self.cb.client.add_bucket_creds(bucket, self.ts.password)

    def read(self):
        curr_items_tmp = self.curr_items.value
        if self.ws.doc_gen == 'ext_reverse_lookup':
            curr_items_tmp //= 4

        for _ in range(self.ws.n1ql_batch_size):
            key = self.existing_keys.next(curr_items=curr_items_tmp,
                                          curr_deletes=0)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    def create(self):
        with self.lock:
            self.curr_items.value += self.ws.n1ql_batch_size
            curr_items_tmp = self.curr_items.value - self.ws.n1ql_batch_size

        for _ in range(self.ws.n1ql_batch_size):
            curr_items_tmp += 1
            key = self.new_keys.next(curr_items=curr_items_tmp)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    def update(self):
        with self.lock:
            curr_items_tmp = self.curr_items.value

        for _ in range(self.ws.n1ql_batch_size):
            key = self.keys_for_cas_update.next(sid=self.sid,
                                                curr_items=curr_items_tmp)
            doc = self.docs.next(key)
            query = self.new_queries.next(key.string, doc)

            latency = self.cb.n1ql_query(query)
            self.reservoir.update(operation='query', value=latency)

    @with_sleep
    def do_batch(self):
        if self.ws.n1ql_op == 'read':
            self.read()
        elif self.ws.n1ql_op == 'create':
            self.create()
        elif self.ws.n1ql_op == 'update':
            self.update()

    def run(self, sid, lock, curr_ops, curr_items, *args):
        if self.throughput < float('inf'):
            self.target_time = self.ws.n1ql_batch_size * self.total_workers / \
                float(self.throughput)
        else:
            self.target_time = None
        self.lock = lock
        self.sid = sid
        self.curr_items = curr_items

        try:
            logger.info('Started: {}-{}'.format(self.NAME, self.sid))
            while not self.time_to_stop():
                self.do_batch()
        except KeyboardInterrupt:
            logger.info('Interrupted: {}-{}'.format(self.NAME, self.sid))
        else:
            logger.info('Finished: {}-{}'.format(self.NAME, self.sid))

        self.dump_stats()