def test_send(self): m = monitors.Monitor() metric1 = metrics_pb2.MetricsPayload() with self.assertRaises(NotImplementedError): m.send(metric1) with self.assertRaises(NotImplementedError): m.failed()
def test_send(self): m = monitors.NullMonitor() payload = metrics_pb2.MetricsPayload() payload.metrics_collection.add().metrics_data_set.add( ).metric_name = 'm1' m.send(payload) self.assertFalse(m.failed())
def test_send_log(self, mock_logging_info): m = monitors.DebugMonitor() payload = metrics_pb2.MetricsPayload() payload.metrics_collection.add().metrics_data_set.add().metric_name = 'm1' m.send(payload) self.assertEqual(1, mock_logging_info.call_count) output = mock_logging_info.call_args[0][1] self.assertIn('metrics_data_set {\n metric_name: "m1"\n }', output)
def _generate_proto(): """Generate MetricsPayload for global_monitor.send().""" proto = metrics_pb2.MetricsPayload() # Key: Target, value: MetricsCollection. collections = {} # Key: (Target, metric name) tuple, value: MetricsDataSet. data_sets = {} count = 0 for (target, metric, start_time, end_time, fields_values) in state.store.get_all(): for fields, value in fields_values.iteritems(): if count >= METRICS_DATA_LENGTH_LIMIT: yield proto proto = metrics_pb2.MetricsPayload() collections.clear() data_sets.clear() count = 0 if target not in collections: collections[target] = proto.metrics_collection.add() target.populate_target_pb(collections[target]) collection = collections[target] key = (target, metric.name) new_data_set = None if key not in data_sets: new_data_set = metrics_pb2.MetricsDataSet() metric.populate_data_set(new_data_set) data = metrics_pb2.MetricsData() metric.populate_data(data, start_time, end_time, fields, value) # All required data protos have been successfully populated. Now we can # insert them in serialized proto and bookeeping data structures. if new_data_set is not None: collection.metrics_data_set.add().CopyFrom(new_data_set) data_sets[key] = collection.metrics_data_set[-1] data_sets[key].data.add().CopyFrom(data) count += 1 if count > 0: yield proto
def test_send_file(self): with infra_libs.temporary_directory() as temp_dir: filename = os.path.join(temp_dir, 'out') m = monitors.DebugMonitor(filename) payload = metrics_pb2.MetricsPayload() payload.metrics_collection.add().metrics_data_set.add().metric_name = 'm1' m.send(payload) with open(filename) as fh: output = fh.read() self.assertIn('metrics_data_set {\n metric_name: "m1"\n }', output)
def test_send_http_failure(self, _load_creds): mon = monitors.HttpsMonitor('endpoint', monitors.CredentialFactory.from_string('/path/to/creds.p8.json')) mon._http.request = mock.MagicMock(side_effect=ValueError()) metric1 = metrics_pb2.MetricsPayload() metric1.metrics_collection.add().metrics_data_set.add().metric_name = 'a' mon.send(metric1) mon._http.request.assert_called_once_with( 'endpoint', method='POST', body=self.message(metric1), headers={'Content-Type': 'application/json'})
def _test_send(self, http): mon = monitors.HttpsMonitor('endpoint', monitors.CredentialFactory.from_string(':gce'), http=http) resp = mock.MagicMock(spec=httplib2.Response, status=200) mon._http.request = mock.MagicMock(return_value=[resp, ""]) payload = metrics_pb2.MetricsPayload() payload.metrics_collection.add().metrics_data_set.add().metric_name = 'a' mon.send(payload) mon._http.request.assert_has_calls([ mock.call('endpoint', method='POST', body=self.message(payload), headers={'Content-Type': 'application/json'}), ])
def test_send_resp_failure(self, _load_creds): mon = monitors.HttpsMonitor('endpoint', monitors.CredentialFactory.from_string('/path/to/creds.p8.json')) resp = mock.MagicMock(spec=httplib2.Response, status=400) mon._http.request = mock.MagicMock(return_value=[resp, ""]) metric1 = metrics_pb2.MetricsPayload() metric1.metrics_collection.add().metrics_data_set.add().metric_name = 'a' mon.send(metric1) mon._http.request.assert_called_once_with( 'endpoint', method='POST', body=self.message(metric1), headers={'Content-Type': 'application/json'})
def _generate_proto(): """Generate MetricsPayload for global_monitor.send().""" proto = metrics_pb2.MetricsPayload() # Key: Target, value: MetricsCollection. collections = {} # pylint: disable=redefined-outer-name # Key: (Target, metric name) tuple, value: MetricsDataSet. data_sets = {} count = 0 for (target, metric, start_times, end_time, fields_values) in state.store.get_all(): for fields, value in six.iteritems(fields_values): # In default, the start time of all data points for a single stream # should be set with the first time of a value change in the stream, # until metric.reset() invoked. # # e.g., # At 00:00. # {value: 1, # fields: ('metric:result': 'success', 'metric:command': 'get_name'), # start_timestamp=0, end_timestamp=0} # # At 00:01. # {value: 1, # fields: ('metric:result': 'success', 'metric:command': 'get_name'), # start_timestamp=0, end_timestamp=1} # # At 00:02. # {value: 2, # fields: ('metric:result': 'success', 'metric:command': 'get_name'), # start_timestamp=0, end_timestamp=2} # # This is important for cumulative metrics, because the monitoring # backend detects the restart of a monitoring target and inserts a reset # point to make Delta()/Rate() computation results accurate. # If a given metric has own start_time, which can be set via # metric.dangerously_set_start_time(), then report all the data points # with the metric-level start_time. # # Otherwise, report data points with the first value change time. start_time = metric.start_time or start_times.get(fields, end_time) if count >= METRICS_DATA_LENGTH_LIMIT: yield proto proto = metrics_pb2.MetricsPayload() collections.clear() data_sets.clear() count = 0 if target not in collections: collections[target] = proto.metrics_collection.add() if isinstance(target, tuple): _populate_root_labels(collections[target].root_labels, target) else: target.populate_target_pb(collections[target]) collection = collections[target] key = (target, metric.name) new_data_set = None if key not in data_sets: new_data_set = metrics_pb2.MetricsDataSet() metric.populate_data_set(new_data_set) data = metrics_pb2.MetricsData() metric.populate_data(data, start_time, end_time, fields, value) # All required data protos have been successfully populated. Now we can # insert them in serialized proto and bookeeping data structures. if new_data_set is not None: collection.metrics_data_set.add().CopyFrom(new_data_set) data_sets[key] = collection.metrics_data_set[-1] data_sets[key].data.add().CopyFrom(data) count += 1 if count > 0: yield proto