Exemplo n.º 1
0
 def init_docs(self):
     if self.ws.doc_gen == 'reverse_lookup':
         self.docs = ReverseLookupDocument(self.ws.size, prefix='n1ql')
     elif self.ws.doc_gen == 'reverse_range_lookup':
         self.docs = ReverseRangeLookupDocument(
             self.ws.size,
             prefix='n1ql',
             range_distance=self.ws.range_distance)
     elif self.ws.doc_gen == 'ext_reverse_lookup':
         self.docs = ExtReverseLookupDocument(self.ws.size,
                                              prefix='n1ql',
                                              num_docs=self.ws.items)
     elif self.ws.doc_gen == 'join':
         self.docs = JoinedDocument(self.ws.size,
                                    prefix='n1ql',
                                    num_docs=self.ws.items,
                                    num_categories=self.ws.num_categories,
                                    num_replies=self.ws.num_replies)
     elif self.ws.doc_gen == 'ref':
         self.docs = RefDocument(self.ws.size, prefix='n1ql')
     elif self.ws.doc_gen == 'profile':
         self.docs = ProfileDocument(self.ws.size, prefix='n1ql')
     elif self.ws.doc_gen == 'array_indexing':
         self.docs = ArrayIndexingDocument(self.ws.size,
                                           prefix='n1ql',
                                           array_size=self.ws.array_size,
                                           num_docs=self.ws.items)
Exemplo n.º 2
0
    def __init__(self, settings, workload, prefix=None):
        super(Latency, self).__init__(settings)
        self.clients = []
        for bucket in self.get_buckets():
            client = CBGen(bucket=bucket,
                           host=settings.master_node,
                           username=bucket,
                           password=settings.bucket_password)
            self.clients.append((bucket, client))

        self.existing_keys = ExistingKey(workload.working_set,
                                         workload.working_set_access,
                                         prefix=prefix)
        self.new_keys = NewKey(prefix=prefix, expiration=workload.expiration)
        self.keys_for_removal = KeyForRemoval(prefix=prefix)

        if not hasattr(workload, 'doc_gen') or workload.doc_gen == 'old':
            self.new_docs = NewDocument(workload.size)
        elif workload.doc_gen == 'new':
            self.new_docs = NewNestedDocument(workload.size)
        elif workload.doc_gen == 'reverse_lookup':
            self.new_docs = ReverseLookupDocument(workload.size,
                                                  workload.doc_partitions)
        elif workload.doc_gen == 'reverse_lookup_array_indexing':
            self.new_docs = ReverseLookupDocumentArrayIndexing(
                workload.size, workload.doc_partitions, workload.items)
        self.items = workload.items
        self.n1ql_op = workload.n1ql_op
Exemplo n.º 3
0
 def init_docs(self):
     if not hasattr(self.ws, 'doc_gen') or self.ws.doc_gen == 'basic':
         self.docs = Document(self.ws.size)
     elif self.ws.doc_gen == 'string':
         self.docs = String(self.ws.size)
     elif self.ws.doc_gen == 'nested':
         self.docs = NestedDocument(self.ws.size)
     elif self.ws.doc_gen == 'reverse_lookup':
         self.docs = ReverseLookupDocument(self.ws.size,
                                           self.ts.prefix)
     elif self.ws.doc_gen == 'reverse_range_lookup':
         self.docs = ReverseRangeLookupDocument(self.ws.size,
                                                prefix='n1ql',
                                                range_distance=self.ws.range_distance)
     elif self.ws.doc_gen == 'ext_reverse_lookup':
         self.docs = ExtReverseLookupDocument(self.ws.size,
                                              self.ts.prefix,
                                              self.ws.items)
     elif self.ws.doc_gen == 'join':
         self.docs = JoinedDocument(self.ws.size,
                                    self.ts.prefix,
                                    self.ws.items,
                                    self.ws.num_categories,
                                    self.ws.num_replies)
     elif self.ws.doc_gen == 'ref':
         self.docs = RefDocument(self.ws.size,
                                 self.ts.prefix)
     elif self.ws.doc_gen == 'array_indexing':
         self.docs = ArrayIndexingDocument(self.ws.size,
                                           self.ts.prefix,
                                           self.ws.array_size,
                                           self.ws.items)
     elif self.ws.doc_gen == 'profile':
         self.docs = ProfileDocument(self.ws.size,
                                     self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_simple':
         self.docs = ImportExportDocument(self.ws.size,
                                          self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_array':
         self.docs = ImportExportDocumentArray(self.ws.size,
                                               self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_nested':
         self.docs = ImportExportDocumentNested(self.ws.size,
                                                self.ts.prefix)
     elif self.ws.doc_gen == 'large_subdoc':
         self.docs = LargeDocument(self.ws.size)
     elif self.ws.doc_gen == 'gsi_multiindex':
         self.docs = GSIMultiIndexDocument(self.ws.size)
     elif self.ws.doc_gen == 'small_plasma':
         self.docs = SmallPlasmaDocument(self.ws.size)
     elif self.ws.doc_gen == 'sequential_plasma':
         self.docs = SequentialPlasmaDocument(self.ws.size)
     elif self.ws.doc_gen == 'large_item_plasma':
         self.docs = LargeItemPlasmaDocument(self.ws.size,
                                             self.ws.item_size)
     elif self.ws.doc_gen == 'varying_item_plasma':
         self.docs = VaryingItemSizePlasmaDocument(self.ws.size,
                                                   self.ws.size_variation_min,
                                                   self.ws.size_variation_max)
Exemplo n.º 4
0
    def __init__(self, workload_settings, target_settings, shutdown_event):
        super(N1QLWorker, self).__init__(workload_settings, target_settings,
                                          shutdown_event)
        self.new_queries = N1QLQueryGen(workload_settings.n1ql_queries)
        self.total_workers = self.ws.n1ql_workers
        self.throughput = self.ws.n1ql_throughput
        self.name = 'n1ql-worker'

        host, port = self.ts.node.split(':')
        bucket = self.ts.bucket
        if workload_settings.n1ql_op == 'ryow':
                bucket += '?fetch_mutation_tokens=true'

        params = {'bucket': bucket, 'host': host, 'port': port,
                  'username': self.ts.bucket, 'password': self.ts.password}

        self.existing_keys = ExistingKey(self.ws.working_set,
                                         self.ws.working_set_access,
                                         'n1ql')
        self.new_keys = NewKey('n1ql', self.ws.expiration)
        self.keys_for_removal = KeyForRemoval('n1ql')
        self.keys_for_casupdate = KeyForCASUpdate(self.total_workers, self.ws.working_set,
                                                  self.ws.working_set_access,
                                                  'n1ql')

        if self.ws.doc_gen == 'merge':
            self.docs = MergeDocument(self.ws.size,
                                              self.ws.doc_partitions,
                                              False)
        elif self.ws.doc_gen == 'reverse_lookup':
            self.docs = ReverseLookupDocument(self.ws.size,
                                              self.ws.doc_partitions,
                                              False)
        elif self.ws.doc_gen == 'reverse_lookup_array_indexing':
            if self.ws.updates:
                self.docs = ReverseLookupDocumentArrayIndexing(
                    self.ws.size, self.ws.doc_partitions, self.ws.items,
                    delta=random.randint(0, 10))
            else:
                 self.docs = ReverseLookupDocumentArrayIndexing(
                    self.ws.size, self.ws.doc_partitions, self.ws.items)
        self.cb = N1QLGen(**params)
Exemplo n.º 5
0
 def init_docs(self):
     if not hasattr(self.ws, 'doc_gen') or self.ws.doc_gen == 'basic':
         self.docs = Document(self.ws.size)
     elif self.ws.doc_gen == 'string':
         self.docs = String(self.ws.size)
     elif self.ws.doc_gen == 'nested':
         self.docs = NestedDocument(self.ws.size)
     elif self.ws.doc_gen == 'reverse_lookup':
         self.docs = ReverseLookupDocument(self.ws.size, self.ts.prefix)
     elif self.ws.doc_gen == 'reverse_range_lookup':
         self.docs = ReverseRangeLookupDocument(self.ws.size,
                                                self.ts.prefix,
                                                self.ws.range_distance)
     elif self.ws.doc_gen == 'ext_reverse_lookup':
         self.docs = ExtReverseLookupDocument(self.ws.size, self.ts.prefix,
                                              self.ws.items)
     elif self.ws.doc_gen == 'hash_join':
         self.docs = HashJoinDocument(self.ws.size, self.ts.prefix,
                                      self.ws.range_distance)
     elif self.ws.doc_gen == 'join':
         self.docs = JoinedDocument(self.ws.size, self.ts.prefix,
                                    self.ws.items, self.ws.num_categories,
                                    self.ws.num_replies)
     elif self.ws.doc_gen == 'ref':
         self.docs = RefDocument(self.ws.size, self.ts.prefix)
     elif self.ws.doc_gen == 'array_indexing':
         self.docs = ArrayIndexingDocument(self.ws.size, self.ts.prefix,
                                           self.ws.array_size,
                                           self.ws.items)
     elif self.ws.doc_gen == 'array_indexing_unique':
         self.docs = ArrayIndexingUniqueDocument(self.ws.size,
                                                 self.ts.prefix,
                                                 self.ws.array_size,
                                                 self.ws.items)
     elif self.ws.doc_gen == 'array_indexing_range_scan':
         self.docs = ArrayIndexingRangeScanDocument(self.ws.size,
                                                    self.ts.prefix,
                                                    self.ws.array_size,
                                                    self.ws.items)
     elif self.ws.doc_gen == 'profile':
         self.docs = ProfileDocument(self.ws.size, self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_simple':
         self.docs = ImportExportDocument(self.ws.size, self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_array':
         self.docs = ImportExportDocumentArray(self.ws.size, self.ts.prefix)
     elif self.ws.doc_gen == 'import_export_nested':
         self.docs = ImportExportDocumentNested(self.ws.size,
                                                self.ts.prefix)
     elif self.ws.doc_gen == 'large':
         self.docs = LargeDocument(self.ws.size)
     elif self.ws.doc_gen == 'gsi_multiindex':
         self.docs = GSIMultiIndexDocument(self.ws.size)
     elif self.ws.doc_gen == 'small_plasma':
         self.docs = SmallPlasmaDocument(self.ws.size)
     elif self.ws.doc_gen == 'sequential_plasma':
         self.docs = SequentialPlasmaDocument(self.ws.size)
     elif self.ws.doc_gen == 'large_item_plasma':
         self.docs = LargeItemPlasmaDocument(self.ws.size,
                                             self.ws.item_size)
     elif self.ws.doc_gen == 'varying_item_plasma':
         self.docs = VaryingItemSizePlasmaDocument(
             self.ws.size, self.ws.size_variation_min,
             self.ws.size_variation_max)
     elif self.ws.doc_gen == 'eventing_small':
         self.docs = EventingSmallDocument(self.ws.size)
     elif self.ws.doc_gen == 'tpc_ds':
         self.docs = TpcDsDocument()
     elif self.ws.doc_gen == 'package':
         self.docs = PackageDocument(self.ws.size)
     elif self.ws.doc_gen == 'incompressible':
         self.docs = IncompressibleString(self.ws.size)
     elif self.ws.doc_gen == 'big_fun':
         self.docs = BigFunDocument()
     elif self.ws.doc_gen == 'multibucket':
         self.docs = MultiBucketDocument(self.ws.size)
     elif self.ws.doc_gen == 'advancedfilter':
         self.docs = AdvFilterDocument(self.ws.size)
     elif self.ws.doc_gen == 'advancedfilterxattr':
         self.docs = AdvFilterXattrBody(self.ws.size)
Exemplo n.º 6
0
    def __init__(self, workload_settings, target_settings,
                 shutdown_event=None):
        self.ws = workload_settings
        self.ts = target_settings
        self.shutdown_event = shutdown_event
        logger.setLevel(logging.INFO)

        self.existing_keys = ExistingKey(self.ws.working_set,
                                         self.ws.working_set_access,
                                         self.ts.prefix)
        self.new_keys = NewKey(self.ts.prefix, self.ws.expiration)
        self.keys_for_removal = KeyForRemoval(self.ts.prefix)

        if not hasattr(self.ws, 'doc_gen') or self.ws.doc_gen == 'old':
            extra_fields = False
            if (hasattr(self.ws, 'extra_doc_fields') and
                    self.ws['extra_doc_fields'] == 'yes'):
                extra_fields = True
            self.docs = NewDocument(self.ws.size, extra_fields)
        elif self.ws.doc_gen == 'new':
            self.docs = NewNestedDocument(self.ws.size)
        elif self.ws.doc_gen == 'merge':
            isRandom = True
            if self.ts.prefix == 'n1ql':
                isRandom = False
            self.docs = MergeDocument(self.ws.size,
                                              self.ws.doc_partitions,
                                              isRandom)
        elif self.ws.doc_gen == 'reverse_lookup':
            isRandom = True
            if self.ts.prefix == 'n1ql':
                isRandom = False
            self.docs = ReverseLookupDocument(self.ws.size,
                                              self.ws.doc_partitions,
                                              isRandom)
        elif self.ws.doc_gen == 'reverse_lookup_array_indexing':
            isRandom = True
            if self.ts.prefix == 'n1ql':
                isRandom = False
            if self.ws.updates:
                # plus 10 to all values in array when updating doc
                self.docs = ReverseLookupDocumentArrayIndexing(
                    self.ws.size, self.ws.doc_partitions, self.ws.items,
                    delta=random.randint(0, 10))
            else:
                self.docs = ReverseLookupDocumentArrayIndexing(
                    self.ws.size, self.ws.doc_partitions, self.ws.items)
        elif self.ws.doc_gen == 'spatial':
            self.docs = NewDocumentFromSpatialFile(
                self.ws.spatial.data,
                self.ws.spatial.dimensionality)
        elif self.ws.doc_gen == 'large_subdoc':
            self.docs = NewLargeDocument(self.ws.size)

        self.next_report = 0.05  # report after every 5% of completion

        host, port = self.ts.node.split(':')
        # Only FTS uses proxyPort and authless bucket right now.
        # Instead of jumping hoops to specify proxyPort in target
        # iterator/settings, which only passes down very specific attributes,
        # just detect fts instead. The following does not work with
        # authless bucket. FTS's worker does its own Couchbase.connect
        if not (hasattr(self.ws, "fts") and hasattr(
            self.ws.fts, "doc_database_url")):
            # default sasl bucket
            self.init_db({'bucket': self.ts.bucket, 'host': host, 'port': port,
                          'username': self.ts.bucket,
                          'password': self.ts.password})

        self.fallingBehindCount = 0