def test_serialize(self): t = targets.DeviceTarget('reg', 'role', 'net', 'host') m = metrics.StringMetric('test') m.set('val') p = metrics_pb2.MetricsCollection() m.serialize_to(p, 1234, (('bar', 1), ('baz', False)), m.get(), t) return str(p).splitlines()
def _wrap_proto(data): """Normalize MetricsData, list(MetricsData), and MetricsCollection. Args: input: A MetricsData, list of MetricsData, or a MetricsCollection. Returns: A MetricsCollection with the appropriate data attribute set. """ if isinstance(data, metrics_pb2.MetricsCollection): ret = data elif isinstance(data, list): ret = metrics_pb2.MetricsCollection(data=data) else: ret = metrics_pb2.MetricsCollection(data=[data]) return ret
def test_start_timestamp(self): t = targets.DeviceTarget('reg', 'role', 'net', 'host') m = metrics.CumulativeMetric('test', fields={'foo': 'bar'}) m.set(3.14) p = metrics_pb2.MetricsCollection() m.serialize_to(p, 1234, (), m.get(), t) self.assertEquals(1234000000, p.data[0].start_timestamp_us)
def test_send(self, _discovery, _load_creds): mon = monitors.PubSubMonitor('/path/to/creds.p8.json', 'myproject', 'mytopic') mon._api = mock.MagicMock() topic = 'projects/myproject/topics/mytopic' metric1 = metrics_pb2.MetricsData(name='m1') mon.send(metric1) metric2 = metrics_pb2.MetricsData(name='m2') mon.send([metric1, metric2]) collection = metrics_pb2.MetricsCollection(data=[metric1, metric2]) mon.send(collection) def message(pb): pb = monitors.Monitor._wrap_proto(pb) return { 'messages': [{ 'data': base64.b64encode(pb.SerializeToString()) }] } publish = mon._api.projects.return_value.topics.return_value.publish publish.assert_has_calls([ mock.call(topic=topic, body=message(metric1)), mock.call().execute(num_retries=5), mock.call(topic=topic, body=message([metric1, metric2])), mock.call().execute(num_retries=5), mock.call(topic=topic, body=message(collection)), mock.call().execute(num_retries=5), ])
def test_serialize_with_units(self): t = targets.DeviceTarget('reg', 'role', 'net', 'host') m = metrics.GaugeMetric('test', units=metrics.MetricsDataUnits.SECONDS) m.set(1) p = metrics_pb2.MetricsCollection() m.serialize_to(p, 1234, (('bar', 1), ('baz', False)), m.get(), t) self.assertEquals(p.data[0].units, metrics.MetricsDataUnits.SECONDS) return str(p).splitlines()
def test_send_log(self): m = monitors.DebugMonitor() metric1 = metrics_pb2.MetricsData(name='m1') m.send(metric1) metric2 = metrics_pb2.MetricsData(name='m2') m.send([metric1, metric2]) collection = metrics_pb2.MetricsCollection(data=[metric1, metric2]) m.send(collection)
def test_start_timestamp(self): t = targets.DeviceTarget('reg', 'role', 'net', 'host') m = metrics.CumulativeDistributionMetric('test') m.add(1) m.add(5) m.add(25) p = metrics_pb2.MetricsCollection() m.serialize_to(p, 1234, (), m.get(), t) self.assertEquals(1234000000, p.data[0].start_timestamp_us)
def test_populate_target(self): pb = metrics_pb2.MetricsCollection() t = targets.TaskTarget('serv', 'job', 'reg', 'host') t.populate_target_pb(pb) self.assertEquals(pb.task.service_name, 'serv') self.assertEquals(pb.task.job_name, 'job') self.assertEquals(pb.task.data_center, 'reg') self.assertEquals(pb.task.host_name, 'host') self.assertEquals(pb.task.task_num, 0)
def test_populate_target(self): pb = metrics_pb2.MetricsCollection() t = targets.DeviceTarget('reg', 'role', 'net', 'host') t.populate_target_pb(pb) self.assertEquals(pb.network_device.metro, 'reg') self.assertEquals(pb.network_device.role, 'role') self.assertEquals(pb.network_device.hostgroup, 'net') self.assertEquals(pb.network_device.hostname, 'host') self.assertEquals(pb.network_device.realm, 'ACQ_CHROME') self.assertEquals(pb.network_device.alertable, True)
def test_send_file(self): with infra_libs.temporary_directory() as temp_dir: filename = os.path.join(temp_dir, 'out') m = monitors.DebugMonitor(filename) metric1 = metrics_pb2.MetricsData(name='m1') m.send(metric1) metric2 = metrics_pb2.MetricsData(name='m2') m.send([metric1, metric2]) collection = metrics_pb2.MetricsCollection(data=[metric1, metric2]) m.send(collection) with open(filename) as fh: output = fh.read() self.assertEquals(output.count('data {\n name: "m1"\n}'), 3) self.assertEquals(output.count('data {\n name: "m2"\n}'), 2)
def flush(): """Send all metrics that are registered in the application.""" if not state.global_monitor or not state.target: raise errors.MonitoringNoConfiguredMonitorError(None) proto = metrics_pb2.MetricsCollection() for target, metric, start_time, fields_values in state.store.get_all(): for fields, value in fields_values.iteritems(): if len(proto.data) >= METRICS_DATA_LENGTH_LIMIT: state.global_monitor.send(proto) del proto.data[:] metric.serialize_to(proto, start_time, fields, value, target) state.global_monitor.send(proto)
def test_send_fails(self, _discovery, _load_creds): # Test for an occasional flake of .publish().execute(). mon = monitors.PubSubMonitor('/path/to/creds.p8.json', 'myproject', 'mytopic') mon._api = mock.MagicMock() topic = 'projects/myproject/topics/mytopic' metric1 = metrics_pb2.MetricsData(name='m1') mon.send(metric1) publish = mon._api.projects.return_value.topics.return_value.publish publish.side_effect = ValueError() metric2 = metrics_pb2.MetricsData(name='m2') mon.send([metric1, metric2]) collection = metrics_pb2.MetricsCollection(data=[metric1, metric2]) publish.side_effect = errors.HttpError( mock.Mock(status=404, reason='test'), '') mon.send(collection) # Test that all caught exceptions are specified without errors. # When multiple exceptions are specified in the 'except' clause, # they are evaluated lazily, and may contain syntax errors. # Throwing an uncaught exception forces all exception specs to be # evaluated, catching more runtime errors. publish.side_effect = Exception('uncaught') with self.assertRaises(Exception): mon.send(collection) def message(pb): pb = monitors.Monitor._wrap_proto(pb) return { 'messages': [{ 'data': base64.b64encode(pb.SerializeToString()) }] } publish.assert_has_calls([ mock.call(topic=topic, body=message(metric1)), mock.call().execute(num_retries=5), mock.call(topic=topic, body=message([metric1, metric2])), mock.call(topic=topic, body=message(collection)), ])
def _test_send(self, http): mon = monitors.HttpsMonitor('endpoint', ':gce', http=http) resp = mock.MagicMock(spec=httplib2.Response, status=200) mon._http.request = mock.MagicMock(return_value=[resp, ""]) metric1 = metrics_pb2.MetricsData(name='m1') mon.send(metric1) metric2 = metrics_pb2.MetricsData(name='m2') mon.send([metric1, metric2]) collection = metrics_pb2.MetricsCollection(data=[metric1, metric2]) mon.send(collection) mon._http.request.assert_has_calls([ mock.call('endpoint', method='POST', body=self.message(metric1)), mock.call('endpoint', method='POST', body=self.message([metric1, metric2])), mock.call('endpoint', method='POST', body=self.message(collection)), ])
def flush(): """Send all metrics that are registered in the application.""" if not state.global_monitor or not state.target: raise errors.MonitoringNoConfiguredMonitorError(None) if not state.flush_enabled_fn(): logging.debug('ts_mon: sending metrics is disabled.') return proto = metrics_pb2.MetricsCollection() for target, metric, start_time, fields_values in state.store.get_all(): for fields, value in fields_values.iteritems(): if len(proto.data) >= METRICS_DATA_LENGTH_LIMIT: state.global_monitor.send(proto) del proto.data[:] metric.serialize_to(proto, start_time, fields, value, target) state.global_monitor.send(proto) state.last_flushed = datetime.datetime.utcnow()
def test_convert(self): task = acquisition_task_pb2.Task(service_name='service') network_device = acquisition_network_device_pb2.NetworkDevice( hostname='host', alertable=True) metric1 = metrics_pb2.MetricsData( name='m1', counter=200, task=task, units=metrics_pb2.MetricsData.Units.Value('SECONDS')) metric2 = metrics_pb2.MetricsData(name='m2', network_device=network_device, cumulative_double_value=123.456) collection = metrics_pb2.MetricsCollection(data=[metric1, metric2], start_timestamp_us=12345) popo = pb_to_popo.convert(collection) expected = { 'data': [{ 'name': 'm1', 'counter': 200L, 'task': { 'service_name': 'service' }, 'units': 1 }, { 'name': 'm2', 'cumulative_double_value': 123.456, 'network_device': { 'hostname': 'host', 'alertable': True } }], 'start_timestamp_us': 12345L } self.assertDictEqual(expected, popo)