def test_GetCompletions_CacheIsNotValid_DifferentForceSemantic( self, app, candidates_list, *args ): with PatchCompleter( DummyCompleter, 'dummy_filetype' ): completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.attr', line_num = 1, column_num = 12, force_semantic = True ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attributeA' ) ) ) completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.attr', line_num = 1, column_num = 12 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attributeB' ) ) ) # We ask for candidates twice because of cache invalidation: # semantic completion is forced for one of the request, not the other. assert_that( candidates_list.call_count, equal_to( 2 ) )
def test_GetCompletions_CacheIsNotValid_DifferentNumberOfLines( self, app, candidates_list, *args ): with PatchCompleter( DummyCompleter, 'dummy_filetype' ): completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.attr\n' 'objectB.attr', line_num = 1, column_num = 12 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attributeA' ) ) ) completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.attr', line_num = 1, column_num = 12 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attributeB' ) ) ) # We ask for candidates twice because of cache invalidation: # both requests have the same cursor position and current line but the # number of lines in the current file is different. assert_that( candidates_list.call_count, equal_to( 2 ) )
def GetCompletions_CacheIsValid_test( app, candidates_list, *args ): with PatchCompleter( DummyCompleter, 'dummy_filetype' ): completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'object.attr', line_num = 1, column_num = 12 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attribute' ) ) ) completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'object.attri', line_num = 1, column_num = 13 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attribute' ) ) ) # We ask for candidates only once because of cache. assert_that( candidates_list.call_count, equal_to( 1 ) )
def GetCompletions_CacheIsNotValid_DifferentContents_test( candidates_list, should_use, app): with PatchCompleter(DummyCompleter, 'dummy_filetype'): completion_data = BuildRequest(filetype='dummy_filetype', contents='objectA = foo\n' 'objectA.attr', line_num=2, column_num=12) results = app.post_json('/completions', completion_data).json['completions'] assert_that(results, has_items(CompletionEntryMatcher('attributeA'))) completion_data = BuildRequest(filetype='dummy_filetype', contents='objectA = bar\n' 'objectA.attr', line_num=2, column_num=12) results = app.post_json('/completions', completion_data).json['completions'] assert_that(results, has_items(CompletionEntryMatcher('attributeB'))) # We ask for candidates twice because of cache invalidation: # both requests have the same cursor position and current line but file # contents are different. assert_that(candidates_list.call_count, equal_to(2))
def GetCompletions_CacheIsNotValid_DifferentCompletionType_test( app, candidates_list, *args ): with PatchCompleter( DummyCompleter, 'dummy_filetype' ): completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.attr', line_num = 1, column_num = 12 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attributeA' ) ) ) completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.attr', line_num = 1, column_num = 12 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attributeB' ) ) ) # We ask for candidates twice because of cache invalidation: # completion types are different between requests. assert_that( candidates_list.call_count, equal_to( 2 ) )
def Diagnostics_DoesWork_test( app, *args ): with PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ): diag_data = BuildRequest( contents = 'foo = 5', filetype = 'dummy_filetype' ) response = app.post_json( '/detailed_diagnostic', diag_data ) assert_that( response.json, MessageMatcher( 'detailed diagnostic' ) )
def test_GetCompletions_CacheIsNotValid_DifferentExtraConfData( self, app, candidates_list, *args ): with PatchCompleter( DummyCompleter, 'dummy_filetype' ): completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.attr', line_num = 1, column_num = 12 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attributeA' ) ) ) completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.attr', line_num = 1, column_num = 12, extra_conf_data = { 'key': 'value' } ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attributeB' ) ) ) # We ask for candidates twice because of cache invalidation: # both requests are identical except the extra conf data. assert_that( candidates_list.call_count, equal_to( 2 ) )
def GetCompletions_ForceSemantic_Works_test( app, *args ): with PatchCompleter( DummyCompleter, 'dummy_filetype' ): completion_data = BuildRequest( filetype = 'dummy_filetype', force_semantic = True ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'foo' ), CompletionEntryMatcher( 'bar' ), CompletionEntryMatcher( 'qux' ) ) )
def GetCompletions_SemanticCompleter_WorksWhenTriggerIsIdentifier_test( candidates, app): with PatchCompleter(DummyCompleter, 'dummy_filetype'): completion_data = BuildRequest(filetype='dummy_filetype', contents='some_can', column_num=9) results = app.post_json('/completions', completion_data).json['completions'] assert_that(results, has_items(CompletionEntryMatcher('some_candidate')))
def Diagnostics_DoesntWork_test( app ): with PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ): diag_data = BuildRequest( contents = "foo = 5", line_num = 2, filetype = 'dummy_filetype' ) response = app.post_json( '/detailed_diagnostic', diag_data, expect_errors = True ) eq_( response.status_code, requests.codes.internal_server_error ) assert_that( response.json, ErrorMatcher( NoDiagnosticSupport ) )
def MiscHandlers_SignatureHelp_DefaultEmptyResponse_test( app, *args ): with PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ): request_data = BuildRequest( filetype = 'dummy_filetype' ) response = app.post_json( '/signature_help', request_data ).json assert_that( response, has_entries( { 'signature_help': has_entries( { 'activeSignature': 0, 'activeParameter': 0, 'signatures': empty() } ), 'errors': empty() } ) )
def MiscHandlers_SignatureHelp_ComputeSignatureThrows_test( app, *args ): with PatchCompleter( DummyCompleter, filetype = 'dummy_filetype' ): request_data = BuildRequest( filetype = 'dummy_filetype' ) response = app.post_json( '/signature_help', request_data ).json print( response ) assert_that( response, has_entries( { 'signature_help': has_entries( { 'activeSignature': 0, 'activeParameter': 0, 'signatures': empty() } ), 'errors': contains( ErrorMatcher( RuntimeError, '' ) ) } ) )
def test_GetCompletions_CacheIsNotValid_DifferentFileData( self, app, candidates_list, *args ): with PatchCompleter( DummyCompleter, 'dummy_filetype' ): completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.attr', line_num = 1, column_num = 12, file_data = { '/bar': { 'contents': 'objectA = foo', 'filetypes': [ 'dummy_filetype' ] } } ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attributeA' ) ) ) completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.attr', line_num = 1, column_num = 12, file_data = { '/bar': { 'contents': 'objectA = bar', 'filetypes': [ 'dummy_filetype' ] } } ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'attributeB' ) ) ) # We ask for candidates twice because of cache invalidation: # both requests have the same cursor position and contents for the current # file but different contents for another file. assert_that( candidates_list.call_count, equal_to( 2 ) )
def GetCompletions_FilterThenReturnFromCache_test( app, candidates_list, *args ): with PatchCompleter( DummyCompleter, 'dummy_filetype' ): # First, fill the cache with an empty query completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.', line_num = 1, column_num = 9 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'aba' ), CompletionEntryMatcher( 'cbc' ) ) ) # Now, filter them. This causes them to be converted to bytes and back completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.c', line_num = 1, column_num = 10 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'cbc' ) ) ) # Finally, request the original (unfiltered) set again. Ensure that we get # proper results (not some bytes objects) # First, fill the cache with an empty query completion_data = BuildRequest( filetype = 'dummy_filetype', contents = 'objectA.', line_num = 1, column_num = 9 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'aba' ), CompletionEntryMatcher( 'cbc' ) ) ) assert_that( candidates_list.call_count, equal_to( 1 ) )
def MiscHandlers_SemanticCompletionAvailable_test(app): with PatchCompleter(DummyCompleter, filetype='dummy_filetype'): request_data = BuildRequest(filetype='dummy_filetype') assert_that( app.post_json('/semantic_completion_available', request_data).json, equal_to(True))
def MiscHandlers_Ready_Subserver_test(app): with PatchCompleter(DummyCompleter, filetype='dummy_filetype'): assert_that( app.get('/ready', { 'subserver': 'dummy_filetype' }).json, equal_to(True))
def MiscHandlers_SignatureHelpAvailable_Subserver_test(app): with PatchCompleter(DummyCompleter, filetype='dummy_filetype'): assert_that( app.get('/signature_help_available', { 'subserver': 'dummy_filetype' }).json, SignatureAvailableMatcher('NO'))
def MiscHandlers_ReceiveMessages_NotSupportedByCompleter_test(app): with PatchCompleter(DummyCompleter, filetype='dummy_filetype'): request_data = BuildRequest(filetype='dummy_filetype') assert_that( app.post_json('/receive_messages', request_data).json, equal_to(False))
def Subcommands_Basic_test(get_subcmd_map, app): with PatchCompleter(DummyCompleter, 'dummy_filetype'): subcommands_data = BuildRequest(completer_target='dummy_filetype') assert_that( app.post_json('/defined_subcommands', subcommands_data).json, contains_exactly('A', 'B', 'C'))
def Subcommands_NoExplicitCompleterTargetSpecified_test(get_subcmd_map, app): with PatchCompleter(DummyCompleter, 'dummy_filetype'): subcommands_data = BuildRequest(filetype='dummy_filetype') assert_that( app.post_json('/defined_subcommands', subcommands_data).json, contains_exactly('A', 'B', 'C'))
def Subcommands_Basic_test(app, *args): with PatchCompleter(DummyCompleter, 'dummy_filetype'): subcommands_data = BuildRequest(completer_target='dummy_filetype') eq_(['A', 'B', 'C'], app.post_json('/defined_subcommands', subcommands_data).json)
def Subcommands_NoExplicitCompleterTargetSpecified_test(app, *args): with PatchCompleter(DummyCompleter, 'dummy_filetype'): subcommands_data = BuildRequest(filetype='dummy_filetype') eq_(['A', 'B', 'C'], app.post_json('/defined_subcommands', subcommands_data).json)
def test_MiscHandlers_Healthy_Subserver(self, app): with PatchCompleter(DummyCompleter, filetype='dummy_filetype'): assert_that( app.get('/healthy', { 'subserver': 'dummy_filetype' }).json, equal_to(True))