def run_lsp_test(self, test_name, test, expected, wait_for_server): if wait_for_server: # wait until hh_server is ready before starting lsp self.run_check() with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( list(self.get_important_received_items(observed_transcript))) # If the server's busy, maybe the machine's just under too much pressure # to give results in a timely fashion. Doing a retry would only defer # the question of what to do in that case, so instead we'll just skip. self.throw_on_skip(observed_transcript) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual( len(expected_items), len(observed_items), "Wrong count. Observed this:\n" + json.dumps(observed_transcript, indent=2, separators=(",", ": ")), ) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i])
def main(): with LspCommandProcessor.create() as lsp_proc: for line in fileinput.input(): command = lsp_proc.build_command(line) if command: command, rw = command process_command(lsp_proc, command, rw)
def run_lsp_test(self, test_name, test, expected): self.run_check() # wait until hh_server is ready before starting lsp with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( self.get_received_items(observed_transcript)) # If the server's busy, maybe the machine's just under too much pressure # to give results in a timely fashion. Doing a retry would only defer # the question of what to do in that case, so instead we'll just skip. if "'message': 'Server busy'" in str(observed_transcript): raise unittest.SkipTest('Hack server busy') return # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual( len(expected_items), len(observed_items), 'Wrong count. Observed this:\n' + json.dumps(observed_transcript, indent=2, separators=(',', ': '))) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i])
def run_lsp_test(self, test_name, test, expected, wait_for_server): if wait_for_server: # wait until hh_server is ready before starting lsp self.run_check() with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( list(self.get_important_received_items(observed_transcript)) ) # If the server's busy, maybe the machine's just under too much pressure # to give results in a timely fashion. Doing a retry would only defer # the question of what to do in that case, so instead we'll just skip. self.throw_on_skip(observed_transcript) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual( len(expected_items), len(observed_items), "Wrong count. Observed this:\n" + json.dumps(observed_transcript, indent=2, separators=(",", ": ")), ) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i])
def main(): with LspCommandProcessor.create() as lsp_proc: for line in fileinput.input(): command = lsp_proc.build_command(line) if command: print_section("SENDING:", command) response = lsp_proc.send(command) print_section("LSP SAID:", response.decode())
def run_lsp_test( self, test_name: str, test: Json, expected: Json, wait_for_server: bool, use_serverless_ide: bool, ) -> None: if wait_for_server: assert not use_serverless_ide, ( "Warning: both `wait_for_server` and `use_serverless_ide` " + "were set to `True` for testing in " + self.run_lsp_test.__name__ + ". " + "While this is a possible test case, it hasn't been written yet, " + "so it's more likely that this is a mistake " + "and you're accidentally relying on hh_server to fulfill " + "serverless IDE requests." + "(If you're writing that test, " + "then it's time to remove this assertion.)" ) # wait until hh_server is ready before starting lsp self.test_driver.run_check() elif use_serverless_ide: self.test_driver.stop_hh_server() with LspCommandProcessor.create( self.test_driver.test_env, use_serverless_ide=use_serverless_ide ) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( list(self.get_important_received_items(observed_transcript)) ) if not use_serverless_ide: # If the server's busy, maybe the machine's just under too much # pressure to give results in a timely fashion. Doing a retry would # only defer the question of what to do in that case, so instead # we'll just skip. self.throw_on_skip(observed_transcript) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual( len(expected_items), len(observed_items), "Wrong count. Observed this:\n" + json.dumps(observed_transcript, indent=2, separators=(",", ": ")), ) for i in range(len(expected_items)): self.assertEqual(expected_items[i], observed_items[i])
def run_lsp_test(self, test_name, test, expected): with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( self.get_received_items(observed_transcript)) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual(len(expected_items), len(observed_items)) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i])
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--request_timeout", type=int, action="store", default=30, help="duration to wait for request responses, in seconds.", ) parser.add_argument( "--notify_timeout", type=int, action="store", default=1, help="duration to wait for notify responses, in seconds.", ) parser.add_argument( "--verbose", action="store_true", default=False, help="display diagnostic information while reading/writing.", ) parser.add_argument( "--silent", action="store_true", default=False, help="suppresses printing of transcript, but not diagnostics.", ) parser.add_argument( "files", metavar="FILE", nargs="*", default=["-"], help="list of files to read, if empty, stdin is used.", ) args = parser.parse_args() commands = LspCommandProcessor.parse_commands(read_commands(args.files)) with LspCommandProcessor.create() as lsp_proc: transcript = lsp_proc.communicate( commands, request_timeout=args.request_timeout, notify_timeout=args.notify_timeout, verbose=args.verbose, ) if not args.silent: print_transcript(lsp_proc, transcript)
def run_lsp_test(self, test_name, test, expected): with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(test) self.write_observed(test_name, observed_transcript) expected_items = self.prepare_responses(expected) observed_items = self.prepare_responses( self.get_received_items(observed_transcript) ) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual(len(expected_items), len(observed_items)) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i])
def run_lsp_test(self, test_name, test, expected, generate): commands = LspCommandProcessor.parse_commands(test) with LspCommandProcessor.create(self.test_env) as lsp: observed_transcript = lsp.communicate(commands) if not generate: expected_items = self.prepare_responses(json.loads(expected)) observed_items = self.prepare_responses( self.get_received_items(observed_transcript)) # validation checks that the number of items matches and that # the responses are exactly identical to what we expect self.assertEqual(len(expected_items), len(observed_items)) for i in range(len(expected_items)): self.assertEqual(observed_items[i], expected_items[i]) else: self.generate_expected(test_name, observed_transcript)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--request_timeout', type=int, action='store', default=30, help='duration to wait for request responses, in seconds.') parser.add_argument( '--notify_timeout', type=int, action='store', default=1, help='duration to wait for notify responses, in seconds.') parser.add_argument( '--verbose', action='store_true', default=False, help='display diagnostic information while reading/writing.') parser.add_argument( '--silent', action='store_true', default=False, help='suppresses printing of transcript, but not diagnostics.') parser.add_argument('files', metavar='FILE', nargs='*', default=['-'], help='list of files to read, if empty, stdin is used.') args = parser.parse_args() commands = LspCommandProcessor.parse_commands(read_commands(args.files)) with LspCommandProcessor.create() as lsp_proc: transcript = lsp_proc.communicate(commands, request_timeout=args.request_timeout, notify_timeout=args.notify_timeout, verbose=args.verbose) if not args.silent: print_transcript(lsp_proc, transcript)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--request_timeout', type=int, action='store', default=30, help='duration to wait for request responses, in seconds.') parser.add_argument('--notify_timeout', type=int, action='store', default=1, help='duration to wait for notify responses, in seconds.') parser.add_argument('--verbose', action='store_true', default=False, help='display diagnostic information while reading/writing.') parser.add_argument('--silent', action='store_true', default=False, help='suppresses printing of transcript, but not diagnostics.') parser.add_argument('files', metavar='FILE', nargs='*', default=['-'], help='list of files to read, if empty, stdin is used.') args = parser.parse_args() commands = LspCommandProcessor.parse_commands(read_commands(args.files)) with LspCommandProcessor.create() as lsp_proc: transcript = lsp_proc.communicate(commands, request_timeout=args.request_timeout, notify_timeout=args.notify_timeout, verbose=args.verbose) if not args.silent: print_transcript(lsp_proc, transcript)
def run_spec( self, spec: LspTestSpec, variables: Mapping[str, str], wait_for_server: bool, use_serverless_ide: bool, ) -> None: if wait_for_server: assert not use_serverless_ide, ( "Warning: both `wait_for_server` and `use_serverless_ide` " + "were set to `True` for testing in " + self.run_lsp_test.__name__ + ". " + "While this is a possible test case, it hasn't been written yet, " + "so it's more likely that this is a mistake " + "and you're accidentally relying on hh_server to fulfill " + "serverless IDE requests." + "(If you're writing that test, " + "then it's time to remove this assertion.)" ) # wait until hh_server is ready before starting lsp self.test_driver.run_check() elif use_serverless_ide: self.test_driver.stop_hh_server() with LspCommandProcessor.create( self.test_driver.test_env, use_serverless_ide=use_serverless_ide ) as lsp_command_processor: (observed_transcript, error_details) = spec.run( lsp_command_processor=lsp_command_processor, variables=variables ) file = os.path.join(self.test_driver.template_repo, spec.name + ".sent.log") text = json.dumps( [ sent for sent, _received in observed_transcript.values() if sent is not None ], indent=2, ) with open(file, "w") as f: f.write(text) file = os.path.join(self.test_driver.template_repo, spec.name + ".received.log") text = json.dumps( [ received for _sent, received in observed_transcript.values() if received is not None ], indent=2, ) with open(file, "w") as f: f.write(text) if not use_serverless_ide: # If the server's busy, maybe the machine's just under too much # pressure to give results in a timely fashion. Doing a retry would # only defer the question of what to do in that case, so instead # we'll just skip. self.throw_on_skip(observed_transcript) if error_details is not None: raise AssertionError(error_details)
def main(): with LspCommandProcessor.create() as lsp_proc: json = lsp_proc.parse_commands(read_commands()) t = lsp_proc.communicate(json) print_transcript(lsp_proc, t)