def _verify_transcript( self, *, variables: VariableMap, transcript: Transcript, lsp_id_map: "_LspIdMap" ) -> Iterable["_ErrorDescription"]: handled_entries = set() for message in self._messages: lsp_id = lsp_id_map[message] if isinstance(message, _RequestSpec): transcript_id = LspCommandProcessor._client_request_id(lsp_id) handled_entries.add(transcript_id) assert transcript_id in transcript, ( f"Expected message with ID {lsp_id!r} " + f"to have an entry in the transcript " + f"under key {transcript_id!r}, " + f"but it was not found. Transcript: {transcript!r}" ) entry = transcript[transcript_id] error_description = self._verify_request( variables=variables, entry=entry, lsp_id=lsp_id, request=message ) if error_description is not None: yield error_description elif isinstance(message, _DebugRequestSpec): transcript_id = LspCommandProcessor._client_request_id(lsp_id) handled_entries.add(transcript_id) assert transcript_id in transcript, ( f"Expected message with ID {lsp_id!r} " + f"to have an entry in the transcript " + f"under key {transcript_id!r}, " + f"but it was not found. Transcript: {transcript!r}" ) entry = transcript[transcript_id] error_description = self._render_telemetry_rage( debug_request=message, result=entry.received["result"] ) yield error_description elif isinstance(message, _NotificationSpec): # Nothing needs to be done here, since we sent the notification # and don't expect a response. pass elif isinstance( message, ( _WaitForRequestSpec, _WaitForNotificationSpec, _WaitForResponseSpec, _WaitForHhServerReadySpec, ), ): # Nothing needs to be done here -- if we failed to wait for the # message, an exception will have been thrown at the # `LspCommandProcessor` layer. pass else: raise ValueError(f"unhandled message type {message.__class__.__name__}") handled_entries |= set(self._find_ignored_transcript_ids(transcript)) yield from self._flag_unhandled_messages( handled_entries, variables, transcript, lsp_id_map )
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--request_timeout", type=int, action="store", default=30, help="duration to wait for request responses, in seconds.", ) parser.add_argument( "--notify_timeout", type=int, action="store", default=1, help="duration to wait for notify responses, in seconds.", ) parser.add_argument( "--verbose", action="store_true", default=False, help="display diagnostic information while reading/writing.", ) parser.add_argument( "--silent", action="store_true", default=False, help="suppresses printing of transcript, but not diagnostics.", ) parser.add_argument( "files", metavar="FILE", nargs="*", default=["-"], help="list of files to read, if empty, stdin is used.", ) args = parser.parse_args() commands = LspCommandProcessor.parse_commands(read_commands(args.files)) with LspCommandProcessor.create() as lsp_proc: transcript = lsp_proc.communicate( commands, request_timeout=args.request_timeout, notify_timeout=args.notify_timeout, verbose=args.verbose, ) if not args.silent: print_transcript(lsp_proc, transcript)
def run_lsp_test(self, test_name, test, expected, wait_for_server): if wait_for_server: # wait until hh_server is ready before starting lsp self.run_check() with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( list(self.get_important_received_items(observed_transcript))) # If the server's busy, maybe the machine's just under too much pressure # to give results in a timely fashion. Doing a retry would only defer # the question of what to do in that case, so instead we'll just skip. self.throw_on_skip(observed_transcript) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual( len(expected_items), len(observed_items), "Wrong count. Observed this:\n" + json.dumps(observed_transcript, indent=2, separators=(",", ": ")), ) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i])
def run_lsp_test(self, test_name, test, expected): self.run_check() # wait until hh_server is ready before starting lsp with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( self.get_received_items(observed_transcript)) # If the server's busy, maybe the machine's just under too much pressure # to give results in a timely fashion. Doing a retry would only defer # the question of what to do in that case, so instead we'll just skip. if "'message': 'Server busy'" in str(observed_transcript): raise unittest.SkipTest('Hack server busy') return # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual( len(expected_items), len(observed_items), 'Wrong count. Observed this:\n' + json.dumps(observed_transcript, indent=2, separators=(',', ': '))) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i])
def run(self, lsp_command_processor: LspCommandProcessor, variables: VariableMap) -> Tuple[Transcript, Optional[str]]: """Run the test given the LSP command processor. Raises an exception with useful debugging information if the test fails.""" (json_commands, lsp_id_map) = self._get_json_commands(variables=variables) transcript = lsp_command_processor.communicate( json_commands=json_commands) errors = list( self._verify_transcript(variables=variables, transcript=transcript, lsp_id_map=lsp_id_map)) if errors: num_errors = len(errors) error_details = ( f"Test case {self.name} failed with {num_errors} errors:\n\n") for i, error in enumerate(errors, 1): error_details += f"Error {i}/{num_errors}:\n" error_details += str(error) + "\n" error_details += """\ If you want to examine the raw LSP logs, you can check the `.sent.log` and `.received.log` files that were generated in the template repo for this test.""" else: error_details = None return (transcript, error_details)
def _find_previous_request( self, transcript: Transcript, # pyre-fixme[11]: Annotation `_LspIdMap` is not defined as a type. lsp_id_map: _LspIdMap, current_id: str, ) -> Optional["_RequestSpec"]: previous_transcript_entries = itertools.takewhile( lambda kv: kv[0] != current_id, transcript.items()) previous_request_entries = [ entry.sent for _id, entry in previous_transcript_entries if entry.sent is not None and LspCommandProcessor._is_request(entry.sent) ] if previous_request_entries: previous_request_lsp_id = previous_request_entries[-1]["id"] else: return None [corresponding_request] = [ request for request, lsp_id in lsp_id_map.items() if lsp_id == previous_request_lsp_id ] assert isinstance( corresponding_request, _RequestSpec ), "We should have identified a client-to-server request at this point" return corresponding_request
def main(): with LspCommandProcessor.create() as lsp_proc: for line in fileinput.input(): command = lsp_proc.build_command(line) if command: command, rw = command process_command(lsp_proc, command, rw)
def run_lsp_test(self, test_name, test, expected, wait_for_server): if wait_for_server: # wait until hh_server is ready before starting lsp self.run_check() with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( list(self.get_important_received_items(observed_transcript)) ) # If the server's busy, maybe the machine's just under too much pressure # to give results in a timely fashion. Doing a retry would only defer # the question of what to do in that case, so instead we'll just skip. self.throw_on_skip(observed_transcript) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual( len(expected_items), len(observed_items), "Wrong count. Observed this:\n" + json.dumps(observed_transcript, indent=2, separators=(",", ": ")), ) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i])
def main(): with LspCommandProcessor.create() as lsp_proc: for line in fileinput.input(): command = lsp_proc.build_command(line) if command: print_section("SENDING:", command) response = lsp_proc.send(command) print_section("LSP SAID:", response.decode())
def run_lsp_test(self, test_name, test, expected, generate): commands = LspCommandProcessor.parse_commands(test) with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(commands) if not generate: expected_items = self.prepare_responses(json.loads(expected)) observed_items = self.prepare_responses( self.get_received_items(observed_transcript)) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual(len(expected_items), len(observed_items)) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i]) else: self.generate_expected(test_name, observed_transcript)
def run_lsp_test( self, test_name: str, test: Json, expected: Json, wait_for_server: bool, use_serverless_ide: bool, ) -> None: if wait_for_server: assert not use_serverless_ide, ( "Warning: both `wait_for_server` and `use_serverless_ide` " + "were set to `True` for testing in " + self.run_lsp_test.__name__ + ". " + "While this is a possible test case, it hasn't been written yet, " + "so it's more likely that this is a mistake " + "and you're accidentally relying on hh_server to fulfill " + "serverless IDE requests." + "(If you're writing that test, " + "then it's time to remove this assertion.)" ) # wait until hh_server is ready before starting lsp self.test_driver.run_check() elif use_serverless_ide: self.test_driver.stop_hh_server() with LspCommandProcessor.create( self.test_driver.test_env, use_serverless_ide=use_serverless_ide ) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( list(self.get_important_received_items(observed_transcript)) ) if not use_serverless_ide: # If the server's busy, maybe the machine's just under too much # pressure to give results in a timely fashion. Doing a retry would # only defer the question of what to do in that case, so instead # we'll just skip. self.throw_on_skip(observed_transcript) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual( len(expected_items), len(observed_items), "Wrong count. Observed this:\n" + json.dumps(observed_transcript, indent=2, separators=(",", ": ")), ) for i in range(len(expected_items)): self.assertEqual(expected_items[i], observed_items[i])
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--request_timeout', type=int, action='store', default=30, help='duration to wait for request responses, in seconds.') parser.add_argument( '--notify_timeout', type=int, action='store', default=1, help='duration to wait for notify responses, in seconds.') parser.add_argument( '--verbose', action='store_true', default=False, help='display diagnostic information while reading/writing.') parser.add_argument( '--silent', action='store_true', default=False, help='suppresses printing of transcript, but not diagnostics.') parser.add_argument('files', metavar='FILE', nargs='*', default=['-'], help='list of files to read, if empty, stdin is used.') args = parser.parse_args() commands = LspCommandProcessor.parse_commands(read_commands(args.files)) with LspCommandProcessor.create() as lsp_proc: transcript = lsp_proc.communicate(commands, request_timeout=args.request_timeout, notify_timeout=args.notify_timeout, verbose=args.verbose) if not args.silent: print_transcript(lsp_proc, transcript)
def test_init_shutdown(self): self.write_load_config() self.check_cmd(['No errors!']) variables = { 'root_path': LspCommandProcessor.path_expand(self.repo_dir) } test_name = 'initialize_shutdown' test, expected = self.load_test_data(test_name, variables) self.run_lsp_test(test_name=test_name, test=test, expected=expected, generate=False)
def run_lsp_test(self, test_name, test, expected): with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( self.get_received_items(observed_transcript)) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual(len(expected_items), len(observed_items)) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i])
def main(): parser = argparse.ArgumentParser() parser.add_argument('--request_timeout', type=int, action='store', default=30, help='duration to wait for request responses, in seconds.') parser.add_argument('--notify_timeout', type=int, action='store', default=1, help='duration to wait for notify responses, in seconds.') parser.add_argument('--verbose', action='store_true', default=False, help='display diagnostic information while reading/writing.') parser.add_argument('--silent', action='store_true', default=False, help='suppresses printing of transcript, but not diagnostics.') parser.add_argument('files', metavar='FILE', nargs='*', default=['-'], help='list of files to read, if empty, stdin is used.') args = parser.parse_args() commands = LspCommandProcessor.parse_commands(read_commands(args.files)) with LspCommandProcessor.create() as lsp_proc: transcript = lsp_proc.communicate(commands, request_timeout=args.request_timeout, notify_timeout=args.notify_timeout, verbose=args.verbose) if not args.silent: print_transcript(lsp_proc, transcript)
def run_lsp_test(self, test_name, test, expected): with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( self.get_received_items(observed_transcript) ) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual(len(expected_items), len(observed_items)) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i])
def run_spec( self, spec: LspTestSpec, variables: Mapping[str, str], wait_for_server: bool, use_serverless_ide: bool, ) -> None: if wait_for_server: assert not use_serverless_ide, ( "Warning: both `wait_for_server` and `use_serverless_ide` " + "were set to `True` for testing in " + self.run_lsp_test.__name__ + ". " + "While this is a possible test case, it hasn't been written yet, " + "so it's more likely that this is a mistake " + "and you're accidentally relying on hh_server to fulfill " + "serverless IDE requests." + "(If you're writing that test, " + "then it's time to remove this assertion.)" ) # wait until hh_server is ready before starting lsp self.test_driver.run_check() elif use_serverless_ide: self.test_driver.stop_hh_server() with LspCommandProcessor.create( self.test_driver.test_env, use_serverless_ide=use_serverless_ide ) as lsp_command_processor: (observed_transcript, error_details) = spec.run( lsp_command_processor=lsp_command_processor, variables=variables ) file = os.path.join(self.test_driver.template_repo, spec.name + ".sent.log") text = json.dumps( [ sent for sent, _received in observed_transcript.values() if sent is not None ], indent=2, ) with open(file, "w") as f: f.write(text) file = os.path.join(self.test_driver.template_repo, spec.name + ".received.log") text = json.dumps( [ received for _sent, received in observed_transcript.values() if received is not None ], indent=2, ) with open(file, "w") as f: f.write(text) if not use_serverless_ide: # If the server's busy, maybe the machine's just under too much # pressure to give results in a timely fashion. Doing a retry would # only defer the question of what to do in that case, so instead # we'll just skip. self.throw_on_skip(observed_transcript) if error_details is not None: raise AssertionError(error_details)
def main(): with LspCommandProcessor.create() as lsp_proc: json = lsp_proc.parse_commands(read_commands()) t = lsp_proc.communicate(json) print_transcript(lsp_proc, t)