Exemple #1
0
 def test_achievements_length(self):
     docgen = NewNestedDocument(avg_size=0)
     for k in range(100000):
         key = '%012d' % k
         doc = docgen.next(key)
         self.assertLessEqual(len(doc['achievements']), 10)
         self.assertGreater(len(doc['achievements']), 0)
Exemple #2
0
 def test_query_formatting(self):
     docgen = NewNestedDocument(avg_size=self.SIZE)
     doc = docgen.next('test-key')
     queries = ['SELECT * from `bucket-1`;', 'SELECT count(*) from `bucket-1`;']
     qgen = N1QLQueryGen(queries=queries)
     _, _, query = qgen.next(doc)
     query.format(bucket='bucket-1')
Exemple #3
0
 def test_achievements_length(self):
     docgen = NewNestedDocument(avg_size=0)
     for k in range(100000):
         key = '%012d' % k
         doc = docgen.next(key)
         self.assertLessEqual(len(doc['achievements']), 10)
         self.assertGreater(len(doc['achievements']), 0)
Exemple #4
0
 def test_gmtime_variation(self):
     docgen = NewNestedDocument(avg_size=0)
     keys = set()
     for k in range(1000):
         key = '%012d' % k
         doc = docgen.next(key)
         keys.add(doc['gmtime'])
     self.assertEqual(len(keys), 12)
Exemple #5
0
 def test_gmtime_variation(self):
     docgen = NewNestedDocument(avg_size=0)
     keys = set()
     for k in range(1000):
         key = '%012d' % k
         doc = docgen.next(key)
         keys.add(doc['gmtime'])
     self.assertEqual(len(keys), 12)
Exemple #6
0
 def test_query_formatting(self):
     docgen = NewNestedDocument(avg_size=self.SIZE)
     doc = docgen.next('test-key')
     queries = [
         'SELECT * from `bucket-1`;', 'SELECT count(*) from `bucket-1`;'
     ]
     qgen = N1QLQueryGen(queries=queries)
     _, _, query = qgen.next(doc)
     query.format(bucket='bucket-1')
Exemple #7
0
 def test_doc_content(self):
     docgen = NewNestedDocument(avg_size=0)
     actual = docgen.next(key='000000000020')
     expected = {
         'name': {
             'f': {
                 'f': {
                     'f': 'ecdb3e e921c9'
                 }
             }
         },
         'email': {
             'f': {
                 'f': '*****@*****.**'
             }
         },
         'street': {
             'f': {
                 'f': '400f1d0a'
             }
         },
         'city': {
             'f': {
                 'f': '90ac48'
             }
         },
         'county': {
             'f': {
                 'f': '40efd6'
             }
         },
         'state': {
             'f': 'WY'
         },
         'full_state': {
             'f': 'Montana'
         },
         'country': {
             'f': '1811db'
         },
         'realm': {
             'f': '15e3f5'
         },
         'coins': {
             'f': 213.54
         },
         'category': 1,
         'achievements': [0, 135, 92],
         'gmtime': (1972, 3, 3, 0, 0, 0, 4, 63, 0),
         'year': 1989,
         'body': '',
         'capped_small': '100_0',
         'capped_large': '3000_0',
     }
     self.assertEqual(actual, expected)
Exemple #8
0
 def test_doc_size(self):
     docgen = NewNestedDocument(avg_size=self.SIZE)
     sizes = tuple(
         len(json.dumps(docgen.next(key='%012s' % i)))
         for i in range(10000))
     mean = np.mean(sizes)
     self.assertAlmostEqual(mean, 1152, delta=128)
     p95 = np.percentile(sizes, 97)
     self.assertLess(p95, 2048)
     p99 = np.percentile(sizes, 98)
     self.assertGreater(p99, 2048)
     self.assertLess(max(sizes), 2 * 1024**2)
     self.assertGreater(min(sizes), 0)
Exemple #9
0
 def test_doc_size(self):
     docgen = NewNestedDocument(avg_size=self.SIZE)
     sizes = tuple(
         len(json.dumps(docgen.next(key='%012s' % i)))
         for i in range(10000)
     )
     mean = np.mean(sizes)
     self.assertAlmostEqual(mean, 1024, delta=128)
     p95 = np.percentile(sizes, 97)
     self.assertLess(p95, 2048)
     p99 = np.percentile(sizes, 98)
     self.assertGreater(p99, 2048)
     self.assertLess(max(sizes), 2 * 1024 ** 2)
     self.assertGreater(min(sizes), 0)
Exemple #10
0
    def __init__(self, settings, workload, prefix=None):
        super(Latency, self).__init__(settings)
        self.clients = []
        for bucket in self.get_buckets():
            client = CBGen(bucket=bucket, host=settings.master_node,
                           username=bucket, password=settings.bucket_password)
            self.clients.append((bucket, client))

        self.existing_keys = ExistingKey(workload.working_set,
                                         workload.working_set_access,
                                         prefix=prefix)
        if not hasattr(workload, 'doc_gen') or workload.doc_gen == 'old':
            self.new_docs = NewDocument(workload.size)
        else:
            self.new_docs = NewNestedDocument(workload.size)
        self.items = workload.items
Exemple #11
0
    def __init__(self, settings, workload, prefix=None):
        super(Latency, self).__init__(settings)
        self.clients = []
        for bucket in self.get_buckets():
            client = CBGen(bucket=bucket,
                           host=settings.master_node,
                           username=bucket,
                           password=settings.bucket_password)
            self.clients.append((bucket, client))

        self.existing_keys = ExistingKey(workload.working_set,
                                         workload.working_set_access,
                                         prefix=prefix)
        self.new_keys = NewKey(prefix=prefix, expiration=workload.expiration)
        self.keys_for_removal = KeyForRemoval(prefix=prefix)

        if not hasattr(workload, 'doc_gen') or workload.doc_gen == 'old':
            self.new_docs = NewDocument(workload.size)
        elif workload.doc_gen == 'new':
            self.new_docs = NewNestedDocument(workload.size)
        elif workload.doc_gen == 'reverse_lookup':
            self.new_docs = ReverseLookupDocument(workload.size,
                                                  workload.doc_partitions)
        elif workload.doc_gen == 'reverse_lookup_array_indexing':
            self.new_docs = ReverseLookupDocumentArrayIndexing(
                workload.size, workload.doc_partitions, workload.items)
        self.items = workload.items
        self.n1ql_op = workload.n1ql_op
Exemple #12
0
 def test_doc_content(self):
     docgen = NewNestedDocument(avg_size=0)
     actual = docgen.next(key='000000000020')
     expected = {
         'name': {'f': {'f': {'f': 'ecdb3e e921c9'}}},
         'email': {'f': {'f': '*****@*****.**'}},
         'street': {'f': {'f': '400f1d0a'}},
         'city': {'f': {'f': '90ac48'}},
         'county': {'f': {'f': '40efd6'}},
         'state': {'f': 'WY'},
         'full_state': {'f': 'Montana'},
         'country': {'f': '1811db'},
         'realm': {'f': '15e3f5'},
         'coins': {'f': 213.54},
         'category': 1,
         'achievements': [0, 135, 92],
         'gmtime': (1972, 3, 3, 0, 0, 0, 4, 63, 0),
         'year': 1989,
         'body': '',
     }
     self.assertEqual(actual, expected)
Exemple #13
0
class SpringLatency(Latency):

    COLLECTOR = "spring_latency"

    METRICS = ("latency_set", "latency_get")

    def __init__(self, settings, workload, prefix=None):
        super(Latency, self).__init__(settings)
        self.clients = []
        for bucket in self.get_buckets():
            client = CBGen(bucket=bucket, host=settings.master_node,
                           username=bucket, password=settings.bucket_password)
            self.clients.append((bucket, client))

        self.existing_keys = ExistingKey(workload.working_set,
                                         workload.working_set_access,
                                         prefix=prefix)
        if not hasattr(workload, 'doc_gen') or workload.doc_gen == 'old':
            self.new_docs = NewDocument(workload.size)
        else:
            self.new_docs = NewNestedDocument(workload.size)
        self.items = workload.items

    def measure(self, client, metric):
        key = self.existing_keys.next(curr_items=self.items, curr_deletes=0)
        doc = self.new_docs.next(key)

        t0 = time()
        if metric == "latency_set":
            client.create(key, doc)
        elif metric == "latency_get":
            client.read(key)
        elif metric == "latency_cas":
            client.cas(key, doc)
        return 1000 * (time() - t0)  # Latency in ms

    def sample(self):
        for bucket, client in self.clients:
            samples = {}
            for metric in self.METRICS:
                samples[metric] = self.measure(client, metric)
            self.store.append(samples, cluster=self.cluster,
                              bucket=bucket, collector=self.COLLECTOR)
Exemple #14
0
def run(keys):
    docs = NewNestedDocument(avg_size=1024)
    for key in keys:
        docs.next(key)
Exemple #15
0
 def test_determenistic(self):
     docgen = NewNestedDocument(avg_size=self.SIZE)
     d1 = docgen.next(key='mykey')
     d2 = docgen.next(key='mykey')
     d1['body'] = d2['body'] = None
     self.assertEqual(d1, d2)
Exemple #16
0
 def test_alphabet_size(self):
     docgen = NewNestedDocument(avg_size=self.SIZE)
     alphabet = docgen._build_alphabet('key')
     self.assertEqual(len(alphabet), 64)
Exemple #17
0
    def __init__(self, workload_settings, target_settings,
                 shutdown_event=None):
        self.ws = workload_settings
        self.ts = target_settings
        self.shutdown_event = shutdown_event
        logger.setLevel(logging.INFO)

        self.existing_keys = ExistingKey(self.ws.working_set,
                                         self.ws.working_set_access,
                                         self.ts.prefix)
        self.new_keys = NewKey(self.ts.prefix, self.ws.expiration)
        self.keys_for_removal = KeyForRemoval(self.ts.prefix)

        if not hasattr(self.ws, 'doc_gen') or self.ws.doc_gen == 'old':
            extra_fields = False
            if (hasattr(self.ws, 'extra_doc_fields') and
                    self.ws['extra_doc_fields'] == 'yes'):
                extra_fields = True
            self.docs = NewDocument(self.ws.size, extra_fields)
        elif self.ws.doc_gen == 'new':
            self.docs = NewNestedDocument(self.ws.size)
        elif self.ws.doc_gen == 'merge':
            isRandom = True
            if self.ts.prefix == 'n1ql':
                isRandom = False
            self.docs = MergeDocument(self.ws.size,
                                              self.ws.doc_partitions,
                                              isRandom)
        elif self.ws.doc_gen == 'reverse_lookup':
            isRandom = True
            if self.ts.prefix == 'n1ql':
                isRandom = False
            self.docs = ReverseLookupDocument(self.ws.size,
                                              self.ws.doc_partitions,
                                              isRandom)
        elif self.ws.doc_gen == 'reverse_lookup_array_indexing':
            isRandom = True
            if self.ts.prefix == 'n1ql':
                isRandom = False
            if self.ws.updates:
                # plus 10 to all values in array when updating doc
                self.docs = ReverseLookupDocumentArrayIndexing(
                    self.ws.size, self.ws.doc_partitions, self.ws.items,
                    delta=random.randint(0, 10))
            else:
                self.docs = ReverseLookupDocumentArrayIndexing(
                    self.ws.size, self.ws.doc_partitions, self.ws.items)
        elif self.ws.doc_gen == 'spatial':
            self.docs = NewDocumentFromSpatialFile(
                self.ws.spatial.data,
                self.ws.spatial.dimensionality)
        elif self.ws.doc_gen == 'large_subdoc':
            self.docs = NewLargeDocument(self.ws.size)

        self.next_report = 0.05  # report after every 5% of completion

        host, port = self.ts.node.split(':')
        # Only FTS uses proxyPort and authless bucket right now.
        # Instead of jumping hoops to specify proxyPort in target
        # iterator/settings, which only passes down very specific attributes,
        # just detect fts instead. The following does not work with
        # authless bucket. FTS's worker does its own Couchbase.connect
        if not (hasattr(self.ws, "fts") and hasattr(
            self.ws.fts, "doc_database_url")):
            # default sasl bucket
            self.init_db({'bucket': self.ts.bucket, 'host': host, 'port': port,
                          'username': self.ts.bucket,
                          'password': self.ts.password})

        self.fallingBehindCount = 0
Exemple #18
0
 def test_determenistic(self):
     docgen = NewNestedDocument(avg_size=self.SIZE)
     d1 = docgen.next(key='mykey')
     d2 = docgen.next(key='mykey')
     d1['body'] = d2['body'] = None
     self.assertEqual(d1, d2)
Exemple #19
0
 def test_alphabet_size(self):
     docgen = NewNestedDocument(avg_size=self.SIZE)
     alphabet = docgen._build_alphabet('key')
     self.assertEqual(len(alphabet), 64)