def __init__(self, settings, workload, prefix=None): super(Latency, self).__init__(settings) self.clients = [] for bucket in self.get_buckets(): client = CBGen(bucket=bucket, host=settings.master_node, username=bucket, password=settings.bucket_password) self.clients.append((bucket, client)) self.existing_keys = ExistingKey(workload.working_set, workload.working_set_access, prefix=prefix) self.new_keys = NewKey(prefix=prefix, expiration=workload.expiration) self.keys_for_removal = KeyForRemoval(prefix=prefix) if not hasattr(workload, 'doc_gen') or workload.doc_gen == 'old': self.new_docs = NewDocument(workload.size) elif workload.doc_gen == 'new': self.new_docs = NewNestedDocument(workload.size) elif workload.doc_gen == 'reverse_lookup': self.new_docs = ReverseLookupDocument(workload.size, workload.doc_partitions) elif workload.doc_gen == 'reverse_lookup_array_indexing': self.new_docs = ReverseLookupDocumentArrayIndexing( workload.size, workload.doc_partitions, workload.items) self.items = workload.items self.n1ql_op = workload.n1ql_op
def __init__(self, workload_settings, target_settings, shutdown_event): super(N1QLWorker, self).__init__(workload_settings, target_settings, shutdown_event) self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries) self.total_workers = self.ws.n1ql_workers self.throughput = self.ws.n1ql_throughput self.name = 'n1ql-worker' host, port = self.ts.node.split(':') bucket = self.ts.bucket if workload_settings.n1ql_op == 'ryow': bucket += '?fetch_mutation_tokens=true' params = {'bucket': bucket, 'host': host, 'port': port, 'username': self.ts.bucket, 'password': self.ts.password} self.existing_keys = ExistingKey(self.ws.working_set, self.ws.working_set_access, 'n1ql') self.new_keys = NewKey('n1ql', self.ws.expiration) self.keys_for_removal = KeyForRemoval('n1ql') self.keys_for_casupdate = KeyForCASUpdate(self.total_workers, self.ws.working_set, self.ws.working_set_access, 'n1ql') if self.ws.doc_gen == 'merge': self.docs = MergeDocument(self.ws.size, self.ws.doc_partitions, False) elif self.ws.doc_gen == 'reverse_lookup': self.docs = ReverseLookupDocument(self.ws.size, self.ws.doc_partitions, False) elif self.ws.doc_gen == 'reverse_lookup_array_indexing': if self.ws.updates: self.docs = ReverseLookupDocumentArrayIndexing( self.ws.size, self.ws.doc_partitions, self.ws.items, delta=random.randint(0, 10)) else: self.docs = ReverseLookupDocumentArrayIndexing( self.ws.size, self.ws.doc_partitions, self.ws.items) self.cb = N1QLGen(**params)
def generate_keys(): new_keys = NewKey(prefix=None, expiration=0) return tuple(new_keys.next(i)[0] for i in xrange(50000))
def __init__(self, workload_settings, target_settings, shutdown_event=None): self.ws = workload_settings self.ts = target_settings self.shutdown_event = shutdown_event logger.setLevel(logging.INFO) self.existing_keys = ExistingKey(self.ws.working_set, self.ws.working_set_access, self.ts.prefix) self.new_keys = NewKey(self.ts.prefix, self.ws.expiration) self.keys_for_removal = KeyForRemoval(self.ts.prefix) if not hasattr(self.ws, 'doc_gen') or self.ws.doc_gen == 'old': extra_fields = False if (hasattr(self.ws, 'extra_doc_fields') and self.ws['extra_doc_fields'] == 'yes'): extra_fields = True self.docs = NewDocument(self.ws.size, extra_fields) elif self.ws.doc_gen == 'new': self.docs = NewNestedDocument(self.ws.size) elif self.ws.doc_gen == 'merge': isRandom = True if self.ts.prefix == 'n1ql': isRandom = False self.docs = MergeDocument(self.ws.size, self.ws.doc_partitions, isRandom) elif self.ws.doc_gen == 'reverse_lookup': isRandom = True if self.ts.prefix == 'n1ql': isRandom = False self.docs = ReverseLookupDocument(self.ws.size, self.ws.doc_partitions, isRandom) elif self.ws.doc_gen == 'reverse_lookup_array_indexing': isRandom = True if self.ts.prefix == 'n1ql': isRandom = False if self.ws.updates: # plus 10 to all values in array when updating doc self.docs = ReverseLookupDocumentArrayIndexing( self.ws.size, self.ws.doc_partitions, self.ws.items, delta=random.randint(0, 10)) else: self.docs = ReverseLookupDocumentArrayIndexing( self.ws.size, self.ws.doc_partitions, self.ws.items) elif self.ws.doc_gen == 'spatial': self.docs = NewDocumentFromSpatialFile( self.ws.spatial.data, self.ws.spatial.dimensionality) elif self.ws.doc_gen == 'large_subdoc': self.docs = NewLargeDocument(self.ws.size) self.next_report = 0.05 # report after every 5% of completion host, port = self.ts.node.split(':') # Only FTS uses proxyPort and authless bucket right now. # Instead of jumping hoops to specify proxyPort in target # iterator/settings, which only passes down very specific attributes, # just detect fts instead. The following does not work with # authless bucket. FTS's worker does its own Couchbase.connect if not (hasattr(self.ws, "fts") and hasattr( self.ws.fts, "doc_database_url")): # default sasl bucket self.init_db({'bucket': self.ts.bucket, 'host': host, 'port': port, 'username': self.ts.bucket, 'password': self.ts.password}) self.fallingBehindCount = 0