def test_break_at_tag_boundary(self): mock_client = _create_mock_client() # Choose tag name sizes such that one tag fits, but not two. Note # that tag names appear in both `Tag.name` and the summary metadata. long_tag_1 = "a" * 384 long_tag_2 = "b" * 384 event = event_pb2.Event(step=1) event.summary.value.add(tag=long_tag_1, simple_value=1.0) event.summary.value.add(tag=long_tag_2, simple_value=2.0) run_to_events = {"train": [event]} builder = _create_request_sender("123", mock_client) builder.send_requests(run_to_events) requests = [c[0][0] for c in mock_client.WriteScalar.call_args_list] for request in requests: _clear_wall_times(request) expected = [ write_service_pb2.WriteScalarRequest(experiment_id="123"), write_service_pb2.WriteScalarRequest(experiment_id="123"), ] (expected[0].runs.add(name="train").tags.add( name=long_tag_1, metadata=test_util.scalar_metadata(long_tag_1)).points.add( step=1, value=1.0)) (expected[1].runs.add(name="train").tags.add( name=long_tag_2, metadata=test_util.scalar_metadata(long_tag_2)).points.add( step=1, value=2.0)) self.assertEqual(requests, expected)
def test_break_at_run_boundary(self): mock_client = _create_mock_client() # Choose run name sizes such that one run fits, but not two. long_run_1 = "A" * 768 long_run_2 = "B" * 768 event_1 = event_pb2.Event(step=1) event_1.summary.value.add(tag="foo", simple_value=1.0) event_2 = event_pb2.Event(step=2) event_2.summary.value.add(tag="bar", simple_value=-2.0) run_to_events = collections.OrderedDict([(long_run_1, [event_1]), (long_run_2, [event_2])]) builder = _create_request_sender("123", mock_client) builder.send_requests(run_to_events) requests = [c[0][0] for c in mock_client.WriteScalar.call_args_list] for request in requests: _clear_wall_times(request) expected = [ write_service_pb2.WriteScalarRequest(experiment_id="123"), write_service_pb2.WriteScalarRequest(experiment_id="123"), ] (expected[0].runs.add(name=long_run_1).tags.add( name="foo", metadata=test_util.scalar_metadata("foo")).points.add(step=1, value=1.0)) (expected[1].runs.add(name=long_run_2).tags.add( name="bar", metadata=test_util.scalar_metadata("bar")).points.add(step=2, value=-2.0)) self.assertEqual(requests, expected)
def test_prunes_tags_and_runs(self): mock_client = _create_mock_client() event_1 = event_pb2.Event(step=1) event_1.summary.value.add(tag="foo", simple_value=1.0) event_2 = event_pb2.Event(step=2) event_2.summary.value.add(tag="bar", simple_value=-2.0) run_to_events = collections.OrderedDict( [("train", [event_1]), ("test", [event_2])] ) real_create_point = ( uploader_lib._ScalarBatchedRequestSender._create_point ) create_point_call_count_box = [0] def mock_create_point(uploader_self, *args, **kwargs): # Simulate out-of-space error the first time that we try to store # the second point. create_point_call_count_box[0] += 1 if create_point_call_count_box[0] == 2: raise uploader_lib._OutOfSpaceError() return real_create_point(uploader_self, *args, **kwargs) with mock.patch.object( uploader_lib._ScalarBatchedRequestSender, "_create_point", mock_create_point, ): builder = uploader_lib._BatchedRequestSender( "123", mock_client, util.RateLimiter(0) ) builder.send_requests(run_to_events) requests = [c[0][0] for c in mock_client.WriteScalar.call_args_list] for request in requests: _clear_wall_times(request) expected = [ write_service_pb2.WriteScalarRequest(experiment_id="123"), write_service_pb2.WriteScalarRequest(experiment_id="123"), ] ( expected[0] .runs.add(name="train") .tags.add(name="foo", metadata=test_util.scalar_metadata("foo")) .points.add(step=1, value=1.0) ) ( expected[1] .runs.add(name="test") .tags.add(name="bar", metadata=test_util.scalar_metadata("bar")) .points.add(step=2, value=-2.0) ) self.assertEqual(expected, requests)
def _new_request(self): """Allocates a new request and refreshes the budget.""" self._request = write_service_pb2.WriteScalarRequest() self._runs.clear() self._tags.clear() self._request.experiment_id = self._experiment_id self._byte_budget_manager.reset(self._request)
def _new_request(self): """Allocates a new request and refreshes the budget.""" self._request = write_service_pb2.WriteScalarRequest() self._byte_budget = _MAX_REQUEST_LENGTH_BYTES self._request.experiment_id = self._experiment_id self._byte_budget -= self._request.ByteSize() if self._byte_budget < 0: raise RuntimeError("Byte budget too small for experiment ID")
def test_start_uploading(self): mock_client = self._create_mock_client() mock_rate_limiter = mock.create_autospec(util.RateLimiter) uploader = uploader_lib.TensorBoardUploader(mock_client, "/logs/foo", mock_rate_limiter) uploader.create_experiment() mock_builder = mock.create_autospec(uploader_lib._RequestBuilder) request = write_service_pb2.WriteScalarRequest() mock_builder.build_requests.side_effect = [ iter([request, request]), iter([request, request, request, request, request]), AbortUploadError, ] # pylint: disable=g-backslash-continuation with mock.patch.object(uploader, "_upload") as mock_upload, \ mock.patch.object(uploader, "_request_builder", mock_builder), \ self.assertRaises(AbortUploadError): uploader.start_uploading() # pylint: enable=g-backslash-continuation self.assertEqual(7, mock_upload.call_count) self.assertEqual(2 + 5 + 1, mock_rate_limiter.tick.call_count)
def test_upload_full_logdir(self): logdir = self.get_temp_dir() mock_client = _create_mock_client() mock_rate_limiter = mock.create_autospec(util.RateLimiter) uploader = uploader_lib.TensorBoardUploader( mock_client, logdir, mock_rate_limiter ) uploader.create_experiment() # Convenience helpers for constructing expected requests. run = write_service_pb2.WriteScalarRequest.Run tag = write_service_pb2.WriteScalarRequest.Tag point = scalar_pb2.ScalarPoint # First round writer = tb_test_util.FileWriter(logdir) writer.add_test_summary("foo", simple_value=5.0, step=1) writer.add_test_summary("foo", simple_value=6.0, step=2) writer.add_test_summary("foo", simple_value=7.0, step=3) writer.add_test_summary("bar", simple_value=8.0, step=3) writer.flush() writer_a = tb_test_util.FileWriter(os.path.join(logdir, "a")) writer_a.add_test_summary("qux", simple_value=9.0, step=2) writer_a.flush() uploader._upload_once() self.assertEqual(1, mock_client.WriteScalar.call_count) request1 = mock_client.WriteScalar.call_args[0][0] _clear_wall_times(request1) expected_request1 = write_service_pb2.WriteScalarRequest( experiment_id="123", runs=[ run( name=".", tags=[ tag( name="foo", metadata=test_util.scalar_metadata("foo"), points=[ point(step=1, value=5.0), point(step=2, value=6.0), point(step=3, value=7.0), ], ), tag( name="bar", metadata=test_util.scalar_metadata("bar"), points=[point(step=3, value=8.0)], ), ], ), run( name="a", tags=[ tag( name="qux", metadata=test_util.scalar_metadata("qux"), points=[point(step=2, value=9.0)], ) ], ), ], ) self.assertProtoEquals(expected_request1, request1) mock_client.WriteScalar.reset_mock() # Second round writer.add_test_summary("foo", simple_value=10.0, step=5) writer.add_test_summary("baz", simple_value=11.0, step=1) writer.flush() writer_b = tb_test_util.FileWriter(os.path.join(logdir, "b")) writer_b.add_test_summary("xyz", simple_value=12.0, step=1) writer_b.flush() uploader._upload_once() self.assertEqual(1, mock_client.WriteScalar.call_count) request2 = mock_client.WriteScalar.call_args[0][0] _clear_wall_times(request2) expected_request2 = write_service_pb2.WriteScalarRequest( experiment_id="123", runs=[ run( name=".", tags=[ tag( name="foo", metadata=test_util.scalar_metadata("foo"), points=[point(step=5, value=10.0)], ), tag( name="baz", metadata=test_util.scalar_metadata("baz"), points=[point(step=1, value=11.0)], ), ], ), run( name="b", tags=[ tag( name="xyz", metadata=test_util.scalar_metadata("xyz"), points=[point(step=1, value=12.0)], ) ], ), ], ) self.assertProtoEquals(expected_request2, request2) mock_client.WriteScalar.reset_mock() # Empty third round uploader._upload_once() mock_client.WriteScalar.assert_not_called()
def testWriteScalar(self): self._stub.WriteScalar(write_service_pb2.WriteScalarRequest())