def testDynamic(self): """Tests for dynamic stats.""" DynamicRoot() self.assertEquals(100, scales.getStats()['dynamic']()) DynamicRoot.value = 200 self.assertEquals(200, scales.getStats()['dynamic']())
def render_GET(self, request): """Renders a GET request, by showing this nodes stats and children.""" fullPath = request.path.split('/') if not fullPath[-1]: fullPath = fullPath[:-1] parts = fullPath[2:] statDict = util.lookup(scales.getStats(), parts) if statDict is None: request.setResponseCode(404) return "Path not found." if 'query' in request.args: query = request.args['query'][0] else: query = None if 'format' in request.args and request.args['format'][0] == 'json': request.headers['content-type'] = 'text/javascript; charset=UTF-8' formats.jsonFormat(request, statDict, query) elif 'format' in request.args and request.args['format'][ 0] == 'prettyjson': request.headers['content-type'] = 'text/javascript; charset=UTF-8' formats.jsonFormat(request, statDict, query, pretty=True) else: formats.htmlHeader(request, '/' + '/'.join(parts), self.serverName, query) formats.htmlFormat(request, tuple(parts), statDict, query) return ''
def testIntDictStatsAggregation(self): """Tests for int dict stats.""" root = AggregatingRoot() errorHolder = root.getChild(Child) errorHolder.errorsStat['400'] += 1 errorHolder.errorsStat['400'] += 2 errorHolder.errorsStat['404'] += 100 errorHolder.errorsStat['400'] += 1 self.assertEquals({ 'Root': { 'errors': { '400': 4, '404': 100 }, 'C': { 'errors': { '400': 4, '404': 100 } } } }, scales.getStats())
def get(self, path): # pylint: disable=W0221 """Renders a GET request, by showing this nodes stats and children.""" path = path or "" path = path.lstrip("/") parts = path.split("/") if not parts[0]: parts = parts[1:] statDict = util.lookup(scales.getStats(), parts) if statDict is None: self.set_status(404) self.finish("Path not found.") return outputFormat = self.get_argument("format", default="html") query = self.get_argument("query", default=None) if outputFormat == "json": formats.jsonFormat(self, statDict, query) elif outputFormat == "prettyjson": formats.jsonFormat(self, statDict, query, pretty=True) else: formats.htmlHeader(self, "/" + path, self.serverName, query) formats.htmlFormat(self, tuple(parts), statDict, query) return None
def dump_stats(self): while not self.should_stop: try: data = scales.getStats()['celery'] log.debug(data) metrics = { 'celery.task.' + x: data.get(x, 0) for x in ['started', 'succeeded', 'failed', 'retried'] } metrics['celery.task.runtime'] = data.get('runtime', {}).get( 'median', -1) metrics['celery.task.queuetime'] = data.get('queuetime', {}).get( 'median', -1) discovery = [] for key, value in data['queues'].items(): metrics['celery.queue[%s]' % key] = value discovery.append({'{#QUEUENAME}': key}) # See <https://www.zabbix.com/documentation/3.0/manual/ # /discovery/low_level_discovery#creating_custom_lld_rules> # and <https://github.com/jbfavre/python-protobix/blob/1.0.1 # /protobix/datacontainer.py#L53> metrics['celery.discover.queues'] = json.dumps( {'data': discovery}) self._send_to_zabbix(metrics) log.debug('Dump thread going to sleep for %s seconds', self.dump_interval) time.sleep(self.dump_interval) except Exception: log.error( 'Uncaught exception, preventing thread from crashing.', exc_info=True)
def get(self, path): # pylint: disable=W0221 """Renders a GET request, by showing this nodes stats and children.""" path = path or '' path = path.lstrip('/') parts = path.split('/') if not parts[0]: parts = parts[1:] statDict = util.lookup(scales.getStats(), parts) if statDict is None: self.set_status(404) self.finish('Path not found.') return outputFormat = self.get_argument('format', default='html') query = self.get_argument('query', default=None) if outputFormat == 'json': formats.jsonFormat(self, statDict, query) elif outputFormat == 'prettyjson': formats.jsonFormat(self, statDict, query, pretty=True) else: formats.htmlHeader(self, '/' + path, self.serverName, query) formats.htmlFormat(self, tuple(parts), statDict, query) return ''
def testIntDictStats(self): """Tests for int dict stats.""" a = Root1() a.errorsStat['400'] += 1 a.errorsStat['400'] += 2 a.errorsStat['404'] += 100 a.errorsStat['400'] -= 3 a.activeUrlsStat['http://www.greplin.com'] += 1 a.activeUrlsStat['http://www.google.com'] += 2 a.activeUrlsStat['http://www.greplin.com'] -= 1 self.assertEquals({ 'path': { 'to': { 'A': { 'errors': { '400': 0, '404': 100 }, 'activeUrls': { 'http://www.google.com': 2 } } } } }, scales.getStats())
def testIntDictStats(self): """Tests for int dict stats.""" a = Root1() a.errorsStat['400'] += 1 a.errorsStat['400'] += 2 a.errorsStat['404'] += 100 a.errorsStat['400'] -= 3 a.activeUrlsStat['http://www.greplin.com'] += 1 a.activeUrlsStat['http://www.google.com'] += 2 a.activeUrlsStat['http://www.greplin.com'] -= 1 self.assertEquals( { 'path': { 'to': { 'A': { 'errors': { '400': 0, '404': 100 }, 'activeUrls': { 'http://www.google.com': 2 } } } } }, scales.getStats())
def push(self, statsDict=None, prefix=None, path=None): """Push stat values out to Graphite.""" if statsDict is None: statsDict = scales.getStats() prefix = prefix or self.prefix path = path or '/' for name, value in list(statsDict.items()): name = str(name) subpath = os.path.join(path, name) if self._pruned(subpath): continue if hasattr(value, '__call__'): try: value = value() except: # pylint: disable=W0702 value = None log.exception( 'Error when calling stat function for graphite push') if hasattr(value, 'items'): self.push(value, '%s%s.' % (prefix, self._sanitize(name)), subpath) elif self._forbidden(subpath, value): continue if six.PY3: type_values = (int, float) else: type_values = (int, long, float) if type(value) in type_values and len(name) < 500: self.graphite.log(prefix + self._sanitize(name), value)
def push(self, statsDict=None, prefix=None, path=None): """Push stat values out to Graphite.""" if statsDict is None: statsDict = scales.getStats() prefix = prefix or self.prefix path = path or "/" for name, value in statsDict.iteritems(): name = str(name) subpath = os.path.join(path, name) if hasattr(value, "iteritems"): self.push(value, "%s%s." % (prefix, self._sanitize(name)), subpath) else: if hasattr(value, "__call__"): try: value = value() except: # pylint: disable=W0702 value = None logging.exception("Error when calling stat function for graphite push") if self._forbidden(subpath, value): continue else: if type(value) in [int, long, float] and len(name) < 500: self.graphite.log(prefix + self._sanitize(name), value)
def testIntDictStatsAggregation(self): """Tests for int dict stats.""" root = AggregatingRoot() errorHolder = root.getChild(Child) errorHolder.errorsStat['400'] += 1 errorHolder.errorsStat['400'] += 2 errorHolder.errorsStat['404'] += 100 errorHolder.errorsStat['400'] += 1 self.assertEquals( { 'Root': { 'errors': { '400': 4, '404': 100 }, 'C': { 'errors': { '400': 4, '404': 100 } } } }, scales.getStats())
def render_GET(self, request): """Renders a GET request, by showing this nodes stats and children.""" fullPath = request.path.split("/") if not fullPath[-1]: fullPath = fullPath[:-1] parts = fullPath[2:] statDict = util.lookup(scales.getStats(), parts) if statDict is None: request.setResponseCode(404) return "Path not found." if "query" in request.args: query = request.args["query"][0] else: query = None if "format" in request.args and request.args["format"][0] == "json": request.headers["content-type"] = "text/javascript; charset=UTF-8" formats.jsonFormat(request, statDict, query) elif "format" in request.args and request.args["format"][0] == "prettyjson": request.headers["content-type"] = "text/javascript; charset=UTF-8" formats.jsonFormat(request, statDict, query, pretty=True) else: formats.htmlHeader(request, "/" + "/".join(parts), self.serverName, query) formats.htmlFormat(request, tuple(parts), statDict, query) return ""
def push(self, statsDict=None, prefix=None, path=None): """Push stat values out.""" if statsDict is None: statsDict = scales.getStats() prefix = prefix or self.prefix path = path or '/' for name, value in statsDict.items(): name = str(name) subpath = os.path.join(path, name) if self._pruned(subpath): continue if hasattr(value, 'iteritems'): self.push(value, '%s%s.' % (prefix, self._sanitize(name)), subpath) else: if hasattr(value, '__call__'): try: value = value() except: value = None log.exception('Error when calling stat function for push') if self._forbidden(subpath, value): continue elif type(value) in [int, long, float] and len(name) < 500: self.send(prefix + self._sanitize(name), value)
def bottlestats(server_name, path=''): """Renders a GET request, by showing this nodes stats and children.""" path = path.lstrip('/') parts = path.split('/') if not parts[0]: parts = parts[1:] stat_dict = util.lookup(scales.getStats(), parts) if stat_dict is None: abort(404, "Not Found") return output = StringIO() output_format = request.query.get('format', 'html') query = request.query.get('query', None) if output_format == 'json': response.content_type = "application/json" formats.jsonFormat(output, stat_dict, query) elif output_format == 'prettyjson': formats.jsonFormat(output, stat_dict, query, pretty=True) response.content_type = "application/json" else: formats.htmlHeader(output, '/' + path, server_name, query) formats.htmlFormat(output, tuple(parts), stat_dict, query) response.content_type = "text/html" return output.getvalue()
def push(self, statsDict=None, prefix=None, path=None): """Push stat values out to Graphite.""" if statsDict is None: statsDict = scales.getStats() prefix = prefix or self.prefix path = path or '/' for name, value in statsDict.items(): name = str(name) subpath = os.path.join(path, name) if self._pruned(subpath): continue if hasattr(value, '__call__'): try: value = value() except: # pylint: disable=W0702 value = None logging.exception('Error when calling stat function for graphite push') if hasattr(value, 'iteritems'): self.push(value, '%s%s.' % (prefix, self._sanitize(name)), subpath) elif self._forbidden(subpath, value): continue elif type(value) in numeric_types and len(name) < 500: self.graphite.log(prefix + self._sanitize(name), value)
def testChildTypeStats(self): """Tests for child stats with typed children (auto-numbered).""" a = Root1() a.stateStat = 'abc' c = a.getChild(TypedChild) c.countStat += 1 b = Root2() c = b.getChild(TypedChild) c.countStat += 2 self.assertEquals({ 'path': { 'to': { 'A': { 'state': 'abc', 'C': { '1': {'count': 1} } } } }, 'B': { 'C': { '2': {'count': 2} }, } }, scales.getStats())
def testChildTypeStats(self): """Tests for child stats with typed children (auto-numbered).""" a = Root1() a.stateStat = 'abc' c = a.getChild(TypedChild) c.countStat += 1 b = Root2() c = b.getChild(TypedChild) c.countStat += 2 self.assertEquals( { 'path': { 'to': { 'A': { 'state': 'abc', 'C': { '1': { 'count': 1 } } } } }, 'B': { 'C': { '2': { 'count': 2 } }, } }, scales.getStats())
def testChildStats(self): """Tests for child scales.""" a = Root1() a.stateStat = 'abc' c = a.getChild(Child) c.countStat += 1 b = Root2() c = b.getChild(Child) c.countStat += 2 self.assertEquals( { 'path': { 'to': { 'A': { 'state': 'abc', 'C': { 'count': 1 } } } }, 'B': { 'C': { 'count': 2 }, } }, scales.getStats())
def testChildStats(self): """Tests for child scales.""" a = Root1() a.stateStat = 'abc' c = a.getChild(Child) c.countStat += 1 b = Root2() c = b.getChild(Child) c.countStat += 2 self.assertEquals({ 'path': { 'to': { 'A': { 'state': 'abc', 'C': { 'count': 1 } } } }, 'B': { 'C': { 'count': 2 }, } }, scales.getStats())
def test_metrics_per_cluster(self): """ Test to validate that metrics can be scopped to invdividual clusters @since 3.6.0 @jira_ticket PYTHON-561 @expected_result metrics should be scopped to a cluster level @test_category metrics """ cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, default_retry_policy=FallthroughRetryPolicy()) cluster2.connect(self.ks_name, wait_for_all_pools=True) self.assertEqual(len(cluster2.metadata.all_hosts()), 3) query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) self.session.execute(query) # Pause node so it shows as unreachable to coordinator get_node(1).pause() try: # Test write query = SimpleStatement( "INSERT INTO {0}.{0} (k, v) VALUES (2, 2)".format( self.ks_name), consistency_level=ConsistencyLevel.ALL) with self.assertRaises(WriteTimeout): self.session.execute(query, timeout=None) finally: get_node(1).resume() # Change the scales stats_name of the cluster2 cluster2.metrics.set_stats_name('cluster2-metrics') stats_cluster1 = self.cluster.metrics.get_stats() stats_cluster2 = cluster2.metrics.get_stats() # Test direct access to stats self.assertEqual(1, self.cluster.metrics.stats.write_timeouts) self.assertEqual(0, cluster2.metrics.stats.write_timeouts) # Test direct access to a child stats self.assertNotEqual(0.0, self.cluster.metrics.request_timer['mean']) self.assertEqual(0.0, cluster2.metrics.request_timer['mean']) # Test access via metrics.get_stats() self.assertNotEqual(0.0, stats_cluster1['request_timer']['mean']) self.assertEqual(0.0, stats_cluster2['request_timer']['mean']) # Test access by stats_name self.assertEqual( 0.0, scales.getStats()['cluster2-metrics']['request_timer']['mean']) cluster2.shutdown()
def jsonFormat(output, statDict = None, query = None, pretty = False): """Formats as JSON, writing to the given object.""" statDict = statDict or scales.getStats() if query: statDict = runQuery(statDict, query) indent = (pretty and 2) or None json.dump(statDict, output, cls=scales.StatContainerEncoder, indent=indent) output.write('\n')
def test_collects_task_events(celery_worker): receiver = celery_zabbix.Receiver(celery_zabbix.conftest.CELERY) # X + task started + task succeeded thread = threading.Thread(target=lambda: receiver(limit=3)) thread.start() celery_zabbix.conftest.celery_ping.delay().get() thread.join() assert scales.getStats()['celery'].get('succeeded') == 1
def jsonFormat(output, statDict = None, query = None, pretty = False): """Formats as JSON, writing to the given object.""" statDict = statDict or scales.getStats() if query: statDict = runQuery(statDict, query) indent = 2 if pretty else None json.dump(statDict, output, cls=scales.StatContainerEncoder, indent=indent) output.write('\n')
def helpTestStatSum(self, a): """Helps test summed stats.""" c = a.getChild(Child) self.assertEquals({ 'Root': { 'C': {}, } }, scales.getStats()) c.countStat += 2 self.assertEquals({ 'Root': { 'count': 2, 'C': { 'count': 2 }, } }, scales.getStats()) d = a.getChild(Child, 'D') self.assertEquals({ 'Root': { 'count': 2, 'C': { 'count': 2 }, 'D': {} } }, scales.getStats()) c.countStat -= 1 d.countStat += 5 self.assertEquals({ 'Root': { 'count': 6, 'C': { 'count': 1 }, 'D': { 'count': 5 } } }, scales.getStats())
def testStatHistogram(self): """Tests for stats aggregated in to a histogram.""" a = AggregatingRoot() c = a.getChild(Child) d = a.getChild(Child, 'D') # Do it twice to make sure its idempotent. for _ in range(2): c.stateStat = 'good' d.stateStat = 'bad' self.assertEquals( { 'Root': { 'state': { 'good': 1, 'bad': 1 }, 'C': { 'state': 'good' }, 'D': { 'state': 'bad' } } }, scales.getStats()) c.stateStat = 'great' d.stateStat = 'great' self.assertEquals( { 'Root': { 'state': { 'great': 2, 'good': 0, 'bad': 0 }, 'C': { 'state': 'great' }, 'D': { 'state': 'great' } } }, scales.getStats())
def testStatHistogram(self): """Tests for stats aggregated in to a histogram.""" a = AggregatingRoot() c = a.getChild(Child) d = a.getChild(Child, 'D') # Do it twice to make sure its idempotent. for _ in range(2): c.stateStat = 'good' d.stateStat = 'bad' self.assertEquals({ 'Root': { 'state': { 'good': 1, 'bad': 1 }, 'C': { 'state': 'good' }, 'D': { 'state': 'bad' } } }, scales.getStats()) c.stateStat = 'great' d.stateStat = 'great' self.assertEquals({ 'Root': { 'state': { 'great': 2, 'good': 0, 'bad': 0 }, 'C': { 'state': 'great' }, 'D': { 'state': 'great' } } }, scales.getStats())
def get(self, path): """Return the `greplin.scales` stats collected so far.""" path = path or '' path = path.lstrip('/') parts = path.split('/') if not parts[0]: parts = parts[1:] statDict = util.lookup(scales.getStats(), parts) serialized = json.dumps(statDict, cls=scales.StatContainerEncoder) self.set_header('Content-Type', 'application/json') self.finish(serialized)
def test_metrics_per_cluster(self): """ Test to validate that metrics can be scopped to invdividual clusters @since 3.6.0 @jira_ticket PYTHON-561 @expected_result metrics should be scopped to a cluster level @test_category metrics """ cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, default_retry_policy=FallthroughRetryPolicy()) cluster2.connect(self.ks_name, wait_for_all_pools=True) self.assertEqual(len(cluster2.metadata.all_hosts()), 3) query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) self.session.execute(query) # Pause node so it shows as unreachable to coordinator get_node(1).pause() try: # Test write query = SimpleStatement("INSERT INTO {0}.{0} (k, v) VALUES (2, 2)".format(self.ks_name), consistency_level=ConsistencyLevel.ALL) with self.assertRaises(WriteTimeout): self.session.execute(query, timeout=None) finally: get_node(1).resume() # Change the scales stats_name of the cluster2 cluster2.metrics.set_stats_name('cluster2-metrics') stats_cluster1 = self.cluster.metrics.get_stats() stats_cluster2 = cluster2.metrics.get_stats() # Test direct access to stats self.assertEqual(1, self.cluster.metrics.stats.write_timeouts) self.assertEqual(0, cluster2.metrics.stats.write_timeouts) # Test direct access to a child stats self.assertNotEqual(0.0, self.cluster.metrics.request_timer['mean']) self.assertEqual(0.0, cluster2.metrics.request_timer['mean']) # Test access via metrics.get_stats() self.assertNotEqual(0.0, stats_cluster1['request_timer']['mean']) self.assertEqual(0.0, stats_cluster2['request_timer']['mean']) # Test access by stats_name self.assertEqual(0.0, scales.getStats()['cluster2-metrics']['request_timer']['mean']) cluster2.shutdown()
def helpTestStatSum(self, a): """Helps test summed stats.""" c = a.getChild(Child) self.assertEquals({'Root': { 'C': {}, }}, scales.getStats()) c.countStat += 2 self.assertEquals({'Root': { 'count': 2, 'C': { 'count': 2 }, }}, scales.getStats()) d = a.getChild(Child, 'D') self.assertEquals({'Root': { 'count': 2, 'C': { 'count': 2 }, 'D': {} }}, scales.getStats()) c.countStat -= 1 d.countStat += 5 self.assertEquals( {'Root': { 'count': 6, 'C': { 'count': 1 }, 'D': { 'count': 5 } }}, scales.getStats())
def testCollection(self): """Tests for a stat collection.""" collection = scales.collection('/thePath', scales.IntStat('count'), scales.IntDictStat('histo')) collection.count += 100 collection.histo['cheese'] += 12300 collection.histo['cheese'] += 45 self.assertEquals({ 'thePath': { 'count': 100, 'histo': { 'cheese': 12345 } } }, scales.getStats())
def testCollection(self): """Tests for a stat collection.""" collection = scales.collection('/thePath', scales.IntStat('count'), scales.IntDictStat('histo')) collection.count += 100 collection.histo['cheese'] += 12300 collection.histo['cheese'] += 45 self.assertEquals( {'thePath': { 'count': 100, 'histo': { 'cheese': 12345 } }}, scales.getStats())
def jsonFormat(output, statDict = None, query = None, pretty = False): """Formats as JSON, writing to the given object.""" statDict = statDict or scales.getStats() if query: statDict = runQuery(statDict, query) indent = 2 if pretty else None # At first, assume that strings are in UTF-8. If this fails -- if, for example, we have # crazy binary data -- then in order to get *something* out, we assume ISO-8859-1, # which maps each byte to a unicode code point. try: serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent) except UnicodeDecodeError: serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent, encoding='iso-8859-1') output.write(serialized) output.write('\n')
def testMultilevelChild(self): """Tests for multi-level child stats.""" a = Root1() c = a.getChild(Child, 'sub/path') c.countStat += 1 self.assertEquals( {'path': { 'to': { 'A': { 'sub': { 'path': { 'count': 1 } } } } }}, scales.getStats())
def testMultilevelChild(self): """Tests for multi-level child stats.""" a = Root1() c = a.getChild(Child, 'sub/path') c.countStat += 1 self.assertEquals({ 'path': { 'to': { 'A': { 'sub': { 'path': { 'count': 1 } } } } } }, scales.getStats())
def get(self): parts = [] statDict = greplin.scales.twistedweb.util.lookup(scales.getStats(), parts) if statDict is None: self.set_status(404) self.write("Path not found.") return query = self.get_argument('query', default=None) if self.get_argument('format', default=None) == 'json': self.set_header('content-type', 'text/javascript; charset=UTF-8') greplin.scales.formats.jsonFormat(self, statDict, query) elif self.get_argument('format', default=None) == 'prettyjson': self.set_header('content-type', 'text/javascript; charset=UTF-8') greplin.scales.formats.jsonFormat(self, statDict, query, pretty=True) else: greplin.scales.formats.htmlHeader(self, '/' + '/'.join(parts), 'svr', query) greplin.scales.formats.htmlFormat(self, tuple(parts), statDict, query)
def jsonFormat(output, statDict=None, query=None, pretty=False): """Formats as JSON, writing to the given object.""" statDict = statDict or scales.getStats() if query: statDict = runQuery(statDict, query) indent = 2 if pretty else None # At first, assume that strings are in UTF-8. If this fails -- if, for example, we have # crazy binary data -- then in order to get *something* out, we assume ISO-8859-1, # which maps each byte to a unicode code point. try: serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent) except UnicodeDecodeError: serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent, encoding='iso-8859-1') output.write(serialized) output.write('\n')
def _push(self, statsDict=None, prefix=None, path=None): """Push stat values out to Graphite.""" if statsDict is None: statsDict = scales.getStats() prefix = prefix or self.prefix path = path or '/' for name, value in statsDict.iteritems(): name = str(name) subpath = os.path.join(path, name) if subpath in self.forbidden: continue if hasattr(value, 'iteritems'): self._push(value, '%s%s.' % (prefix, self._sanitize(name)), subpath) else: if hasattr(value, '__call__'): value = value() for rule in self.rules: if not rule(name, value): break else: self.graphite.log(prefix + self._sanitize(name), value)
def statsHandler(serverName, path=''): """Renders a GET request, by showing this nodes stats and children.""" path = path.lstrip('/') parts = path.split('/') if not parts[0]: parts = parts[1:] statDict = util.lookup(scales.getStats(), parts) if statDict is None: abort(404, 'No stats found with path /%s' % '/'.join(parts)) output = StringIO() outputFormat = request.args.get('format', 'html') query = request.args.get('query', None) if outputFormat == 'json': formats.jsonFormat(output, statDict, query) elif outputFormat == 'prettyjson': formats.jsonFormat(output, statDict, query, pretty=True) else: formats.htmlHeader(output, '/' + path, serverName, query) formats.htmlFormat(output, tuple(parts), statDict, query) return output.getvalue()
def statsHandler(serverName, path=""): """Renders a GET request, by showing this nodes stats and children.""" path = path.lstrip("/") parts = path.split("/") if not parts[0]: parts = parts[1:] statDict = util.lookup(scales.getStats(), parts) if statDict is None: abort(404) return output = StringIO() outputFormat = request.args.get("format", "html") query = request.args.get("query", None) if outputFormat == "json": formats.jsonFormat(output, statDict, query) elif outputFormat == "prettyjson": formats.jsonFormat(output, statDict, query, pretty=True) else: formats.htmlHeader(output, "/" + path, serverName, query) formats.htmlFormat(output, tuple(parts), statDict, query) return output.getvalue()
def htmlFormat(output, pathParts = (), statDict = None, query = None): """Formats as HTML, writing to the given object.""" statDict = statDict or scales.getStats() if query: statDict = runQuery(statDict, query) _htmlRenderDict(pathParts, statDict, output)
def file_transmit(session, futures, tbl_entry, full_name): global file_counter trace(1, "Uploading file '" + full_name + "' ... ") cols = tbl_entry[2].split(",") keys = tbl_entry[3].split(",") fieldnames = tbl_entry[2].split(",") # Remove extra service column if options.mode == CassMode.MODE_PLAIN: cols.remove('hvr_is_deleted') col_names = ','.join([str(x) for x in cols]) cols_placeholders = ','.join(['%s' for x in cols]) keys_placeholders = ' AND '.join([x + '=%s' for x in keys]) base_name = tbl_entry[0] # statistics information start = timer() row_count = 0 prepare_query_time = 0.0 read_file = 0.0 row_size = 0 with open(full_name) as csvfile: reader = csv.DictReader(csvfile, fieldnames=fieldnames) start_read = timer() for row in reader: #Sending a single row file_time_s = timer() read_file += (file_time_s - start_read) trace(3, "Sending row #{0}".format(row_count + 1)) query = None values = [] if options.mode == CassMode.MODE_PLAIN: if int(row['hvr_is_deleted']) == 1: query = "DELETE FROM {0} WHERE {1}".format( base_name, keys_placeholders) [values.append(str(row[k])) for k in keys] for k in keys: row_size += len(row[k]) else: query = "INSERT INTO {0} ({1}) VALUES ({2})".format( base_name, col_names, cols_placeholders) [values.append(str(row[c])) for c in cols] else: query = "INSERT INTO {0} ({1}) VALUES ({2})".format( base_name, col_names, cols_placeholders) [values.append(str(row[c])) for c in cols] for c in cols: row_size += len(row[c]) prepare_query_time += (timer() - file_time_s) if row_count > 0 and row_count % (QUEUE_SIZE - 1) == 0: # clear the existing queue while True: cass_e = timer() try: futures.get_nowait().result() except queue.Empty: break future = session.execute_async(query, values) futures.put_nowait(future) row_count = row_count + 1 start_read = timer() # wait to finalize of messages transmitting process while True: try: futures.get_nowait().result() except queue.Empty: break finish_time = timer() elapsed_time = finish_time - start cass_time = elapsed_time - prepare_query_time - read_file print("Uploading '{0}' is finished".format(full_name)) print("Transmitted {0} rows".format(row_count)) print("Spent time {0:.2f} secs".format(elapsed_time)) trace(1, "Read file {0:.2f} secs".format(read_file)) trace(1, "Prepare queries {0:.2f} secs".format(prepare_query_time)) trace(1, "Put data into Cassandra {0:.2f} secs".format(cass_time)) trace(1, "Speed {0:.2f} rows/secs".format((row_count / elapsed_time))) trace(1, "Cass speed {0:.2f} rows/secs".format((row_count / cass_time))) trace(1, "Avg row size {0:.0f} bytes".format((row_size / row_count))) if options.trace >= 2: stats = scales.getStats()['cassandra'] print("Connection errors: %d" % stats['connection_errors']) print("Write timeouts: %d" % stats['write_timeouts']) print("Read timeouts: %d" % stats['read_timeouts']) print("Unavailables: %d" % stats['unavailables']) print("Other errors: %d" % stats['other_errors']) print("Retries: %d" % stats['retries']) request_timer = stats['request_timer'] print("Request latencies:") print(" count: %d" % request_timer['count']) print(" min: %0.4fs" % request_timer['min']) print(" max: %0.4fs" % request_timer['max']) print(" mean: %0.4fs" % request_timer['mean']) print(" stddev: %0.4fs" % request_timer['stddev']) print(" median: %0.4fs" % request_timer['median']) print(" 75th: %0.4fs" % request_timer['75percentile']) print(" 95th: %0.4fs" % request_timer['95percentile']) print(" 98th: %0.4fs" % request_timer['98percentile']) print(" 99th: %0.4fs" % request_timer['99percentile']) print(" 99.9th: %0.4fs" % request_timer['999percentile']) # remove successfully transmitted file os.remove(full_name) file_counter = file_counter + 1
def benchmark(thread_class): options, args = parse_options() for conn_class in options.supported_reactors: setup(options) log.info("==== %s ====" % (conn_class.__name__,)) kwargs = {'metrics_enabled': options.enable_metrics, 'connection_class': conn_class} if options.protocol_version: kwargs['protocol_version'] = options.protocol_version cluster = Cluster(options.hosts, **kwargs) session = cluster.connect(options.keyspace) log.debug("Sleeping for two seconds...") time.sleep(2.0) # Generate the query if options.read: query = "SELECT * FROM {0} WHERE thekey = '{{key}}'".format(TABLE) else: query = "INSERT INTO {0} (thekey".format(TABLE) for i in range(options.num_columns): query += ", col{0}".format(i) query += ") VALUES ('{key}'" for i in range(options.num_columns): query += ", {0}".format(COLUMN_VALUES[options.column_type]) query += ")" values = None # we don't use that anymore. Keeping it in case we go back to prepared statements. per_thread = options.num_ops // options.threads threads = [] log.debug("Beginning {0}...".format('reads' if options.read else 'inserts')) start = time.time() try: for i in range(options.threads): thread = thread_class( i, session, query, values, per_thread, cluster.protocol_version, options.profile) thread.daemon = True threads.append(thread) for thread in threads: thread.start() for thread in threads: while thread.is_alive(): thread.join(timeout=0.5) end = time.time() finally: cluster.shutdown() teardown(options) total = end - start log.info("Total time: %0.2fs" % total) log.info("Average throughput: %0.2f/sec" % (options.num_ops / total)) if options.enable_metrics: stats = scales.getStats()['cassandra'] log.info("Connection errors: %d", stats['connection_errors']) log.info("Write timeouts: %d", stats['write_timeouts']) log.info("Read timeouts: %d", stats['read_timeouts']) log.info("Unavailables: %d", stats['unavailables']) log.info("Other errors: %d", stats['other_errors']) log.info("Retries: %d", stats['retries']) request_timer = stats['request_timer'] log.info("Request latencies:") log.info(" min: %0.4fs", request_timer['min']) log.info(" max: %0.4fs", request_timer['max']) log.info(" mean: %0.4fs", request_timer['mean']) log.info(" stddev: %0.4fs", request_timer['stddev']) log.info(" median: %0.4fs", request_timer['median']) log.info(" 75th: %0.4fs", request_timer['75percentile']) log.info(" 95th: %0.4fs", request_timer['95percentile']) log.info(" 98th: %0.4fs", request_timer['98percentile']) log.info(" 99th: %0.4fs", request_timer['99percentile']) log.info(" 99.9th: %0.4fs", request_timer['999percentile'])
def benchmark(thread_class): options, args = parse_options() for conn_class in options.supported_reactors: setup(options) log.info("==== %s ====" % (conn_class.__name__, )) kwargs = { 'metrics_enabled': options.enable_metrics, 'connection_class': conn_class } if options.protocol_version: kwargs['protocol_version'] = options.protocol_version cluster = Cluster(options.hosts, **kwargs) session = cluster.connect(options.keyspace) log.debug("Sleeping for two seconds...") time.sleep(2.0) # Generate the query if options.read: query = "SELECT * FROM {0} WHERE thekey = '{{key}}'".format(TABLE) else: query = "INSERT INTO {0} (thekey".format(TABLE) for i in range(options.num_columns): query += ", col{0}".format(i) query += ") VALUES ('{key}'" for i in range(options.num_columns): query += ", {0}".format(COLUMN_VALUES[options.column_type]) query += ")" values = None # we don't use that anymore. Keeping it in case we go back to prepared statements. per_thread = options.num_ops // options.threads threads = [] log.debug( "Beginning {0}...".format('reads' if options.read else 'inserts')) start = time.time() try: for i in range(options.threads): thread = thread_class(i, session, query, values, per_thread, cluster.protocol_version, options.profile) thread.daemon = True threads.append(thread) for thread in threads: thread.start() for thread in threads: while thread.is_alive(): thread.join(timeout=0.5) end = time.time() finally: cluster.shutdown() teardown(options) total = end - start log.info("Total time: %0.2fs" % total) log.info("Average throughput: %0.2f/sec" % (options.num_ops / total)) if options.enable_metrics: stats = scales.getStats()['cassandra'] log.info("Connection errors: %d", stats['connection_errors']) log.info("Write timeouts: %d", stats['write_timeouts']) log.info("Read timeouts: %d", stats['read_timeouts']) log.info("Unavailables: %d", stats['unavailables']) log.info("Other errors: %d", stats['other_errors']) log.info("Retries: %d", stats['retries']) request_timer = stats['request_timer'] log.info("Request latencies:") log.info(" min: %0.4fs", request_timer['min']) log.info(" max: %0.4fs", request_timer['max']) log.info(" mean: %0.4fs", request_timer['mean']) log.info(" stddev: %0.4fs", request_timer['stddev']) log.info(" median: %0.4fs", request_timer['median']) log.info(" 75th: %0.4fs", request_timer['75percentile']) log.info(" 95th: %0.4fs", request_timer['95percentile']) log.info(" 98th: %0.4fs", request_timer['98percentile']) log.info(" 99th: %0.4fs", request_timer['99percentile']) log.info(" 99.9th: %0.4fs", request_timer['999percentile'])
def htmlFormat(output, pathParts=(), statDict=None, query=None): """Formats as HTML, writing to the given object.""" statDict = statDict or scales.getStats() if query: statDict = runQuery(statDict, query) _htmlRenderDict(pathParts, statDict, output)
def benchmark(thread_class): options, args = parse_options() for conn_class in options.supported_reactors: setup(options.hosts) log.info("==== %s ====" % (conn_class.__name__, )) kwargs = { 'metrics_enabled': options.enable_metrics, 'connection_class': conn_class } if options.protocol_version: kwargs['protocol_version'] = options.protocol_version cluster = Cluster(options.hosts, **kwargs) session = cluster.connect(KEYSPACE) log.debug("Sleeping for two seconds...") time.sleep(2.0) query = session.prepare(""" INSERT INTO {table} (thekey, col1, col2) VALUES (?, ?, ?) """.format(table=TABLE)) values = ('key', 'a', 'b') per_thread = options.num_ops // options.threads threads = [] log.debug("Beginning inserts...") start = time.time() try: for i in range(options.threads): thread = thread_class(i, session, query, values, per_thread, cluster.protocol_version, options.profile) thread.daemon = True threads.append(thread) for thread in threads: thread.start() for thread in threads: while thread.is_alive(): thread.join(timeout=0.5) end = time.time() finally: cluster.shutdown() teardown(options.hosts) total = end - start log.info("Total time: %0.2fs" % total) log.info("Average throughput: %0.2f/sec" % (options.num_ops / total)) if options.enable_metrics: stats = scales.getStats()['cassandra'] log.info("Connection errors: %d", stats['connection_errors']) log.info("Write timeouts: %d", stats['write_timeouts']) log.info("Read timeouts: %d", stats['read_timeouts']) log.info("Unavailables: %d", stats['unavailables']) log.info("Other errors: %d", stats['other_errors']) log.info("Retries: %d", stats['retries']) request_timer = stats['request_timer'] log.info("Request latencies:") log.info(" min: %0.4fs", request_timer['min']) log.info(" max: %0.4fs", request_timer['max']) log.info(" mean: %0.4fs", request_timer['mean']) log.info(" stddev: %0.4fs", request_timer['stddev']) log.info(" median: %0.4fs", request_timer['median']) log.info(" 75th: %0.4fs", request_timer['75percentile']) log.info(" 95th: %0.4fs", request_timer['95percentile']) log.info(" 98th: %0.4fs", request_timer['98percentile']) log.info(" 99th: %0.4fs", request_timer['99percentile']) log.info(" 99.9th: %0.4fs", request_timer['999percentile'])
def benchmark(thread_class): options, args = parse_options() for conn_class in options.supported_reactors: setup(options.hosts) log.info("==== %s ====" % (conn_class.__name__,)) kwargs = {'metrics_enabled': options.enable_metrics, 'connection_class': conn_class} if options.protocol_version: kwargs['protocol_version'] = options.protocol_version cluster = Cluster(options.hosts, **kwargs) session = cluster.connect(KEYSPACE) log.debug("Sleeping for two seconds...") time.sleep(2.0) query = session.prepare(""" INSERT INTO {table} (thekey, col1, col2) VALUES (?, ?, ?) """.format(table=TABLE)) values = ('key', 'a', 'b') per_thread = options.num_ops // options.threads threads = [] log.debug("Beginning inserts...") start = time.time() try: for i in range(options.threads): thread = thread_class( i, session, query, values, per_thread, cluster.protocol_version, options.profile) thread.daemon = True threads.append(thread) for thread in threads: thread.start() for thread in threads: while thread.is_alive(): thread.join(timeout=0.5) end = time.time() finally: cluster.shutdown() teardown(options.hosts) total = end - start log.info("Total time: %0.2fs" % total) log.info("Average throughput: %0.2f/sec" % (options.num_ops / total)) if options.enable_metrics: stats = scales.getStats()['cassandra'] log.info("Connection errors: %d", stats['connection_errors']) log.info("Write timeouts: %d", stats['write_timeouts']) log.info("Read timeouts: %d", stats['read_timeouts']) log.info("Unavailables: %d", stats['unavailables']) log.info("Other errors: %d", stats['other_errors']) log.info("Retries: %d", stats['retries']) request_timer = stats['request_timer'] log.info("Request latencies:") log.info(" min: %0.4fs", request_timer['min']) log.info(" max: %0.4fs", request_timer['max']) log.info(" mean: %0.4fs", request_timer['mean']) log.info(" stddev: %0.4fs", request_timer['stddev']) log.info(" median: %0.4fs", request_timer['median']) log.info(" 75th: %0.4fs", request_timer['75percentile']) log.info(" 95th: %0.4fs", request_timer['95percentile']) log.info(" 98th: %0.4fs", request_timer['98percentile']) log.info(" 99th: %0.4fs", request_timer['99percentile']) log.info(" 99.9th: %0.4fs", request_timer['999percentile'])
def get_stats(self): """ Returns the metrics for the registered cluster instance. """ return scales.getStats()[self.stats_name]
def benchmark(thread_class): options, args = parse_options() for conn_class in options.supported_reactors: setup(options.hosts) log.info("==== %s ====" % (conn_class.__name__,)) cluster = Cluster(options.hosts, metrics_enabled=options.enable_metrics) cluster.connection_class = conn_class session = cluster.connect(KEYSPACE) log.debug("Sleeping for two seconds...") time.sleep(2.0) query = SimpleStatement( """ INSERT INTO {table} (thekey, col1, col2) VALUES (%(key)s, %(a)s, %(b)s) """.format( table=TABLE ) ) values = {"key": "key", "a": "a", "b": "b"} per_thread = options.num_ops / options.threads threads = [] log.debug("Beginning inserts...") start = time.time() try: for i in range(options.threads): thread = thread_class(i, session, query, values, per_thread, options.profile) thread.daemon = True threads.append(thread) for thread in threads: thread.start() for thread in threads: while thread.is_alive(): thread.join(timeout=0.5) end = time.time() finally: teardown(options.hosts) total = end - start log.info("Total time: %0.2fs" % total) log.info("Average throughput: %0.2f/sec" % (options.num_ops / total)) if options.enable_metrics: stats = scales.getStats()["cassandra"] log.info("Connection errors: %d", stats["connection_errors"]) log.info("Write timeouts: %d", stats["write_timeouts"]) log.info("Read timeouts: %d", stats["read_timeouts"]) log.info("Unavailables: %d", stats["unavailables"]) log.info("Other errors: %d", stats["other_errors"]) log.info("Retries: %d", stats["retries"]) request_timer = stats["request_timer"] log.info("Request latencies:") log.info(" min: %0.4fs", request_timer["min"]) log.info(" max: %0.4fs", request_timer["max"]) log.info(" mean: %0.4fs", request_timer["mean"]) log.info(" stddev: %0.4fs", request_timer["stddev"]) log.info(" median: %0.4fs", request_timer["median"]) log.info(" 75th: %0.4fs", request_timer["75percentile"]) log.info(" 95th: %0.4fs", request_timer["95percentile"]) log.info(" 98th: %0.4fs", request_timer["98percentile"]) log.info(" 99th: %0.4fs", request_timer["99percentile"]) log.info(" 99.9th: %0.4fs", request_timer["999percentile"])