def test_batch_request_with_component_errors(self, spy_plugin_controller, test_data, batch_request): data = test_data[0] token_1 = batch_request.render('MyComponent1.js', data[0]) token_2 = batch_request.render('MyComponent2.js', data[1]) job_2 = Job(name='MyComponent2.js', data=data[1]) fake_response_json = { 'error': None, 'results': { token_1.identifier: { 'error': None, 'html': '<div>wow such SSR</div>', }, token_2.identifier: { 'error': { 'name': 'SomeError', 'message': 'we goofed', 'stack': ['line 1', 'line 2'] }, 'html': None, } } } with mock.patch('fido.fetch') as mock_fetch: mock_fetch.return_value.wait.return_value.json.return_value = fake_response_json response = batch_request.submit() if batch_request.max_batch_size is None: assert mock_fetch.call_count == 1 else: # Division (rounded-up) up to get total number of calls jobs_count = len(batch_request.jobs) max_batch_size = batch_request.max_batch_size assert mock_fetch.call_count == (jobs_count + (max_batch_size - 1)) // max_batch_size assert response == { token_1.identifier: JobResult( error=None, html='<div>wow such SSR</div>', job=Job(name='MyComponent1.js', data=data[0]) ), token_2.identifier: JobResult( error=HypernovaError( name='SomeError', message='we goofed', stack=['line 1', 'line 2'], ), html=render_blank_markup(token_2.identifier, job_2, True, batch_request.json_encoder), job=job_2, ) }
def _parse_response(self, response_json): """Parse a raw JSON response into a response dict. :rtype: Dict[str, JobResult] """ response = {} for identifier, result in response_json['results'].items(): job = self.jobs[identifier] error = None if result['error']: error = HypernovaError( name=result['error']['name'], message=result['error']['message'], stack=result['error']['stack'], ) self.plugin_controller.on_error(error, {identifier: job}, self.pyramid_request) html = result['html'] if not html: html = render_blank_markup(identifier, job, True, self.json_encoder) response[identifier] = JobResult(error=error, html=html, job=job) return response
def test_calls_after_response(self, spy_plugin_controller, test_data, batch_request): data = test_data[0] ssr_token = batch_request.render('MySsrComponent.js', data[0]) fake_response_json = { 'error': None, 'results': { ssr_token.identifier: { 'error': None, 'html': '<div>wow such SSR</div>', } } } with mock.patch('fido.fetch') as mock_fetch: mock_fetch.return_value.wait.return_value.json.return_value = fake_response_json response = batch_request.submit() assert spy_plugin_controller.after_response.called parsed_response = { ssr_token.identifier: JobResult( error=None, html='<div>wow such SSR</div>', job=batch_request.jobs[ssr_token.identifier], ), } assert response == spy_plugin_controller.after_response(parsed_response)
def test_batch_request_with_unhealthy_service(self, spy_plugin_controller, test_data, batch_request): data = test_data[0] job = Job(name='MyComponent.js', data=data[0]) token = batch_request.render('MyComponent.js', data[0]) with mock.patch('fido.fetch') as mock_fetch: mock_fetch.return_value.wait.return_value.json.side_effect = NetworkError('oh no') response = batch_request.submit() if batch_request.max_batch_size is None: assert mock_fetch.call_count == 1 else: # Division (rounded-up) up to get total number of calls jobs_count = len(batch_request.jobs) max_batch_size = batch_request.max_batch_size assert mock_fetch.call_count == (jobs_count + (max_batch_size - 1)) // max_batch_size assert response == { token.identifier: JobResult( error=HypernovaError( name="<class 'fido.exceptions.NetworkError'>", message='oh no', stack=mock.ANY, ), html=render_blank_markup(token.identifier, job, True, batch_request.json_encoder), job=job, ), }
def mock_setup(self): self.token = RenderToken('my-unique-id') mock_handler = mock.Mock() mock_handler.return_value = mock.Mock( text=str(self.token) ) mock_get_batch_url = mock.Mock(return_value='http://localhost:8888/batch') self.mock_json_encoder = mock.Mock() self.mock_batch_request_factory = mock.Mock() self.mock_batch_request_factory.return_value.submit.return_value = { 'my-unique-id': JobResult( error=None, html='<div>REACT!</div>', job=None, ) } self.mock_registry = mock.Mock() self.mock_registry.settings = { 'pyramid_hypernova.get_batch_url': mock_get_batch_url, 'pyramid_hypernova.batch_request_factory': self.mock_batch_request_factory, 'pyramid_hypernova.json_encoder': self.mock_json_encoder, } self.tween = hypernova_tween_factory(mock_handler, self.mock_registry) self.mock_request = mock.Mock()
def test_batch_request_with_application_error(self, spy_plugin_controller, batch_request): job = Job(name='MyComponent.js', data={'foo': 'bar'}) token = batch_request.render('MyComponent.js', {'foo': 'bar'}) fake_response_json = { 'error': { 'name': 'SomeError', 'message': 'yikes', 'stack': ['line 1', 'line 2'] } } with mock.patch('fido.fetch') as mock_fetch: mock_fetch.return_value.wait.return_value.json.return_value = fake_response_json response = batch_request.submit() if batch_request.max_batch_size is None: assert mock_fetch.call_count == 1 else: # Division (rounded-up) up to get total number of calls jobs_count = len(batch_request.jobs) max_batch_size = batch_request.max_batch_size assert mock_fetch.call_count == (jobs_count + (max_batch_size - 1)) // max_batch_size assert response == { token.identifier: JobResult( error=HypernovaError( name='SomeError', message='yikes', stack=['line 1', 'line 2'], ), html=render_blank_markup(token.identifier, job, True), job=job, ), }
def create_fallback_response(jobs, throw_client_error, error=None): return { identifier: JobResult( error=error, html=render_blank_markup(identifier, job, throw_client_error), job=job, ) for identifier, job in jobs.items() }
def test_create_fallback_response(jobs, throw_client_error, json_encoder): expected_response = { identifier: JobResult( error=None, html=render_blank_markup(identifier, job, throw_client_error, json_encoder), job=job, ) for identifier, job in jobs.items() } assert create_fallback_response(jobs, throw_client_error, json_encoder) == expected_response
def test_batch_request_with_application_error( self, spy_plugin_controller, test_data, batch_request, mock_hypernova_query, ): data = test_data[0] job = Job(name='MyComponent.js', data=data[0], context={}) token = batch_request.render('MyComponent.js', data[0]) fake_response_json = { 'error': { 'name': 'SomeError', 'message': 'yikes', 'stack': ['line 1', 'line 2'] } } mock_hypernova_query.return_value.json.return_value = fake_response_json response = batch_request.submit() if batch_request.max_batch_size is None: assert mock_hypernova_query.call_count == 1 else: # Division (rounded-up) up to get total number of calls jobs_count = len(batch_request.jobs) max_batch_size = batch_request.max_batch_size batch_count = (jobs_count + (max_batch_size - 1)) // max_batch_size assert mock_hypernova_query.call_count == batch_count mock_hypernova_query.assert_called_with(mock.ANY, mock.ANY, mock.ANY, batch_count == 1, {}) assert response == { token.identifier: JobResult( error=HypernovaError( name='SomeError', message='yikes', stack=['line 1', 'line 2'], ), html=render_blank_markup(token.identifier, job, True, batch_request.json_encoder), job=job, ), }
def create_fallback_response(jobs, throw_client_error, json_encoder, error=None): """Create a response dict for falling back to client-side rendering. :rtype: Dict[str, Job] """ return { identifier: JobResult( error=error, html=render_blank_markup(identifier, job, throw_client_error, json_encoder), job=job, ) for identifier, job in jobs.items() }
def test_hypernova_token_replacement(): token = RenderToken('my-unique-id') mock_hypernova_batch = mock.Mock() mock_hypernova_batch.submit.return_value = { 'my-unique-id': JobResult( error=None, html='<div>REACT!</div>', job=None, ) } with hypernova_token_replacement(mock_hypernova_batch) as body: body['content'] = str(token) assert mock_hypernova_batch.submit.called assert body['content'] == '<div>REACT!</div>'
def test_batch_request_with_unhealthy_service( self, spy_plugin_controller, test_data, batch_request, mock_hypernova_query, ): data = test_data[0] job = Job(name='MyComponent.js', data=data[0], context={}) token = batch_request.render('MyComponent.js', data[0]) mock_hypernova_query.return_value.json.side_effect = HypernovaQueryError( 'oh no') response = batch_request.submit() if batch_request.max_batch_size is None: assert mock_hypernova_query.call_count == 1 else: # Division (rounded-up) up to get total number of calls jobs_count = len(batch_request.jobs) max_batch_size = batch_request.max_batch_size batch_count = (jobs_count + (max_batch_size - 1)) // max_batch_size assert mock_hypernova_query.call_count == batch_count mock_hypernova_query.assert_called_with(mock.ANY, mock.ANY, mock.ANY, batch_count == 1, {}) assert response == { token.identifier: JobResult( error=HypernovaError( name='HypernovaQueryError', message='oh no', stack=mock.ANY, ), html=render_blank_markup(token.identifier, job, True, batch_request.json_encoder), job=job, ), }
def test_tween_replaces_tokens(): token = RenderToken('my-unique-id') mock_handler = mock.Mock() mock_handler.return_value = mock.Mock(text=str(token)) mock_batch_request_factory = mock.Mock() mock_get_batch_url = mock.Mock(return_value='http://localhost:8888/batch') mock_json_encoder = mock.Mock() mock_registry = mock.Mock() mock_registry.settings = { 'pyramid_hypernova.get_batch_url': mock_get_batch_url, 'pyramid_hypernova.batch_request_factory': mock_batch_request_factory, 'pyramid_hypernova.json_encoder': mock_json_encoder, } tween = hypernova_tween_factory(mock_handler, mock_registry) mock_request = mock.Mock() mock_batch_request_factory.return_value.submit.return_value = { 'my-unique-id': JobResult( error=None, html='<div>REACT!</div>', job=None, ) } response = tween(mock_request) mock_batch_request_factory.assert_called_once_with( batch_url='http://localhost:8888/batch', plugin_controller=mock.ANY, json_encoder=mock_json_encoder, ) assert mock_batch_request_factory.return_value.submit.called assert response.text == '<div>REACT!</div>'
def test_create_fallback_response(throw_client_error): jobs = { 'some-unique-id': Job( name='FooBar.js', data={'baz': 1234}, ), 'some-other-unique-id': Job( name='MyComponent.js', data={'title': 'sup'}, ), } expected_response = { identifier: JobResult( error=None, html=render_blank_markup(identifier, job, throw_client_error), job=job, ) for identifier, job in jobs.items() } assert create_fallback_response(jobs, throw_client_error) == expected_response
def test_successful_batch_request(self, spy_plugin_controller, test_data, batch_request): data = test_data[0] token_1 = batch_request.render('component-1.js', data[0]) token_2 = batch_request.render('component-2.js', data[1]) token_3 = batch_request.render('component-3.js', data[2]) assert batch_request.jobs == { token_1.identifier: Job( name='component-1.js', data=data[0], ), token_2.identifier: Job( name='component-2.js', data=data[1], ), token_3.identifier: Job( name='component-3.js', data=data[2], ), } fake_response_json = { 'error': None, 'results': { token_1.identifier: { 'error': None, 'html': '<div>component 1</div>', }, token_2.identifier: { 'error': None, 'html': '<div>component 2</div>', }, token_3.identifier: { 'error': None, 'html': '<div>component 3</div>', }, } } with mock.patch('fido.fetch') as mock_fetch: mock_fetch.return_value.wait.return_value.json.return_value = fake_response_json response = batch_request.submit() if batch_request.max_batch_size is None: assert mock_fetch.call_count == 1 else: # Division (rounded-up) up to get total number of calls jobs_count = len(batch_request.jobs) max_batch_size = batch_request.max_batch_size assert mock_fetch.call_count == (jobs_count + (max_batch_size - 1)) // max_batch_size assert response == { token_1.identifier: JobResult( error=None, html='<div>component 1</div>', job=Job(name='component-1.js', data=data[0]) ), token_2.identifier: JobResult( error=None, html='<div>component 2</div>', job=Job(name='component-2.js', data=data[1]) ), token_3.identifier: JobResult( error=None, html='<div>component 3</div>', job=Job(name='component-3.js', data=data[2]) ), }