def test_parse_utf8_data(self): csv = u"a,b\nà,ù" csv_stream = _string_io(csv, "utf-8") data = parse_csv(csv_stream) assert_that(data, contains(contains(["a", "b"], [u"à", u"ù"])))
def DebugInfo_Initialized_test( app ): request_data = BuildRequest( filepath = PathToTestFile( 'basic.cpp' ), filetype = 'cpp' ) test = { 'request': request_data } RunAfterInitialized( app, test ) assert_that( app.post_json( '/debug_info', request_data ).json, has_entry( 'completer', has_entries( { 'name': 'clangd', 'servers': contains( has_entries( { 'name': 'clangd', 'is_running': True, 'extras': contains( has_entries( { 'key': 'Server State', 'value': 'Initialized', } ), has_entries( { 'key': 'Project Directory', 'value': PathToTestFile(), } ), has_entries( { 'key': 'Settings', 'value': '{}', } ), ), } ) ), 'items': empty() } ) ) )
def Diagnostics_ZeroBasedLineAndColumn_test( app ): contents = """ void foo() { double baz = "foo"; } // Padding to 5 lines // Padding to 5 lines """ event_data = BuildRequest( compilation_flags = [ '-x', 'c++' ], event_name = 'FileReadyToParse', contents = contents, filepath = 'foo', filetype = 'cpp' ) results = app.post_json( '/event_notification', event_data ).json assert_that( results, contains( has_entries( { 'kind': equal_to( 'ERROR' ), 'text': contains_string( 'cannot initialize' ), 'ranges': contains( RangeMatcher( 'foo', ( 3, 16 ), ( 3, 21 ) ) ), 'location': LocationMatcher( 'foo', 3, 10 ), 'location_extent': RangeMatcher( 'foo', ( 3, 10 ), ( 3, 13 ) ) } ) ) )
def RefactorRename_MultipleFiles_OnFileReadyToParse_test( self ): file1 = self._PathToTestFile( 'file1.js' ) file2 = self._PathToTestFile( 'file2.js' ) file3 = self._PathToTestFile( 'file3.js' ) # This test is roughly the same as the previous one, except here file4.js is # pushed into the Tern engine via 'opening it in the editor' (i.e. # FileReadyToParse event). The first 3 are loaded into the tern server # because they are listed in the .tern-project file's loadEagerly option. file4 = self._PathToTestFile( 'file4.js' ) self._app.post_json( '/event_notification', self._BuildRequest( **{ 'filetype': 'javascript', 'event_name': 'FileReadyToParse', 'contents': open( file4 ).read(), 'filepath': file4, } ), expect_errors = False ) self._RunTest( { 'description': 'FileReadyToParse loads files into tern server', 'request': { 'command': 'RefactorRename', 'arguments': [ 'a-quite-long-string' ], 'filepath': file1, 'line_num': 3, 'column_num': 14, }, 'expect': { 'response': httplib.OK, 'data': { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( 'a-quite-long-string', LocationMatcher( file1, 1, 5 ), LocationMatcher( file1, 1, 11 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file1, 3, 14 ), LocationMatcher( file1, 3, 19 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file2, 2, 14 ), LocationMatcher( file2, 2, 19 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file3, 3, 12 ), LocationMatcher( file3, 3, 17 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file4, 4, 22 ), LocationMatcher( file4, 4, 28 ) ) ) , 'location': LocationMatcher( file1, 3, 14 ) } ) ) } } } )
def Diagnostics_ZeroBasedLineAndColumn_test( app ): contents = """ void foo() { double baz = "foo"; } // Padding to 5 lines // Padding to 5 lines """ filepath = PathToTestFile( 'foo.cc' ) request = { 'contents': contents, 'filepath': filepath, 'filetype': 'cpp' } test = { 'request': request, 'route': '/receive_messages' } results = RunAfterInitialized( app, test ) assert_that( results, contains( has_entries( { 'diagnostics': contains( has_entries( { 'kind': equal_to( 'ERROR' ), 'text': contains_string( 'Cannot initialize' ), 'ranges': contains( RangeMatcher( filepath, ( 3, 10 ), ( 3, 13 ) ) ), 'location': LocationMatcher( filepath, 3, 10 ), 'location_extent': RangeMatcher( filepath, ( 3, 10 ), ( 3, 13 ) ) } ) ) } ) ) )
def Diagnostics_MaximumDiagnosticsNumberExceeded_test( app ): filepath = PathToTestFile( 'max_diagnostics.cc' ) contents = ReadFile( filepath ) event_data = BuildRequest( contents = contents, event_name = 'FileReadyToParse', filetype = 'cpp', filepath = filepath, compilation_flags = [ '-x', 'c++' ] ) response = app.post_json( '/event_notification', event_data ).json pprint( response ) assert_that( response, contains( has_entries( { 'kind': equal_to( 'ERROR' ), 'location': LocationMatcher( filepath, 3, 9 ), 'location_extent': RangeMatcher( filepath, ( 3, 9 ), ( 3, 13 ) ), 'ranges': empty(), 'text': equal_to( "redefinition of 'test'" ), 'fixit_available': False } ), has_entries( { 'kind': equal_to( 'ERROR' ), 'location': LocationMatcher( filepath, 1, 1 ), 'location_extent': RangeMatcher( filepath, ( 1, 1 ), ( 1, 1 ) ), 'ranges': contains( RangeMatcher( filepath, ( 1, 1 ), ( 1, 1 ) ) ), 'text': equal_to( 'Maximum number of diagnostics exceeded.' ), 'fixit_available': False } ) ) )
def FixIt_Check_cpp11_Note( results ): assert_that( results, has_entries( { 'fixits': contains( # First note: put parens around it has_entries( { 'text': contains_string( 'parentheses around the assignment' ), 'chunks': contains( ChunkMatcher( '(', LineColMatcher( 59, 8 ), LineColMatcher( 59, 8 ) ), ChunkMatcher( ')', LineColMatcher( 61, 12 ), LineColMatcher( 61, 12 ) ) ), 'location': LineColMatcher( 60, 8 ), } ), # Second note: change to == has_entries( { 'text': contains_string( '==' ), 'chunks': contains( ChunkMatcher( '==', LineColMatcher( 60, 8 ), LineColMatcher( 60, 9 ) ) ), 'location': LineColMatcher( 60, 8 ), } ) ) } ) )
def DebugInfo_ServerIsRunning_test( app ): filepath = PathToTestFile( 'testy', 'Program.cs' ) contents = ReadFile( filepath ) event_data = BuildRequest( filepath = filepath, filetype = 'cs', contents = contents, event_name = 'FileReadyToParse' ) app.post_json( '/event_notification', event_data ) WaitUntilCompleterServerReady( app, 'cs' ) request_data = BuildRequest( filepath = filepath, filetype = 'cs' ) assert_that( app.post_json( '/debug_info', request_data ).json, has_entry( 'completer', has_entries( { 'name': 'C#', 'servers': contains( has_entries( { 'name': 'OmniSharp', 'is_running': True, 'executable': instance_of( str ), 'pid': instance_of( int ), 'address': instance_of( str ), 'port': instance_of( int ), 'logfiles': contains( instance_of( str ), instance_of( str ) ), 'extras': contains( has_entries( { 'key': 'solution', 'value': instance_of( str ) } ) ) } ) ), 'items': empty() } ) ) )
def EventNotification_FileReadyToParse_SyntaxKeywords_ClearCacheIfRestart_test( ycm, *args ): current_buffer = VimBuffer( name = 'current_buffer', filetype = 'some_filetype' ) with patch( 'ycm.client.event_notification.EventNotification.' 'PostDataToHandlerAsync' ) as post_data_to_handler_async: with MockVimBuffers( [ current_buffer ], [ current_buffer ] ): ycm.OnFileReadyToParse() assert_that( # Positional arguments passed to PostDataToHandlerAsync. post_data_to_handler_async.call_args[ 0 ], contains( has_entry( 'syntax_keywords', has_items( 'foo', 'bar' ) ), 'event_notification' ) ) # Send again the syntax keywords after restarting the server. ycm.RestartServer() WaitUntilReady() ycm.OnFileReadyToParse() assert_that( # Positional arguments passed to PostDataToHandlerAsync. post_data_to_handler_async.call_args[ 0 ], contains( has_entry( 'syntax_keywords', has_items( 'foo', 'bar' ) ), 'event_notification' ) )
def test_parse_xlsx_handle_empty_cells_and_lines(self): assert_that(self._parse_excel("empty_cell_and_row.xlsx"), contains( contains( ["Next cell is none", None, "Previous cell is none"], [None, None, None], ["The above row", "is full", "of nones"] )))
def EventNotification_FileReadyToParse_SyntaxKeywords_SeedWithCache_test( ycm, *args ): current_buffer = VimBuffer( name = 'current_buffer', filetype = 'some_filetype' ) with patch( 'ycm.client.event_notification.EventNotification.' 'PostDataToHandlerAsync' ) as post_data_to_handler_async: with MockVimBuffers( [ current_buffer ], [ current_buffer ] ): ycm.OnFileReadyToParse() assert_that( # Positional arguments passed to PostDataToHandlerAsync. post_data_to_handler_async.call_args[ 0 ], contains( has_entry( 'syntax_keywords', has_items( 'foo', 'bar' ) ), 'event_notification' ) ) # Do not send again syntax keywords in subsequent requests. ycm.OnFileReadyToParse() assert_that( # Positional arguments passed to PostDataToHandlerAsync. post_data_to_handler_async.call_args[ 0 ], contains( is_not( has_key( 'syntax_keywords' ) ), 'event_notification' ) )
def test_display_with_a_type_only(self): result = self.lookup('lice', 'test') assert_that(result['column_headers'], contains('fn', 'ln', 'Empty', None, 'Default')) assert_that(result['column_types'], contains('firstname', None, None, 'status', None)) assert_that(result['results'][0]['column_values'], contains('Alice', 'AAA', None, None, 'Default'))
def test_partially_execute_actions_that_are_started(self): # given end_time = 20 mid_execution_action = Mock(name="mid_execution", end_time=end_time, start_time=end_time-1) pre_start_action = Mock(name="not_started") execution_state = [ ActionState(mid_execution_action, None, ExecutionState.executing), ActionState(pre_start_action, None, ExecutionState.pre_start) ] self.PriorityQueue().empty.return_value = False self.PriorityQueue().queue = tuple(execution_state) observation_time = 0 self.execute_action_queue.side_effect = [ (ExecutionResult([], {observation_time}, observation_time), {}), (ExecutionResult([], set(), None), {}) ] self.execute_partial_actions.return_value = [mid_execution_action] model = Mock(name="model") execution_extension = 100 plan = [mid_execution_action, pre_start_action] # when actual = simulator.run_plan(model, plan, Mock(name="sim_time"), execution_extension) # then assert_that(actual.executed, contains(mid_execution_action)) assert_that(actual.observations, contains(observation_time)) assert_that(actual.simulation_time, equal_to(end_time)) self.execute_partial_actions.assert_called_once_with( self.get_executing_actions.return_value, model, observation_time+execution_extension)
def Subcommands_RefactorRename_Unicode_test( app ): filepath = PathToTestFile( 'unicode.js' ) RunTest( app, { 'description': 'RefactorRename works with unicode identifiers', 'request': { 'command': 'RefactorRename', 'arguments': [ '†es†' ], 'filepath': filepath, 'line_num': 11, 'column_num': 3, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( '†es†', LocationMatcher( filepath, 5, 5 ), LocationMatcher( filepath, 5, 13 ) ), ChunkMatcher( '†es†', LocationMatcher( filepath, 9, 1 ), LocationMatcher( filepath, 9, 9 ) ), ChunkMatcher( '†es†', LocationMatcher( filepath, 11, 1 ), LocationMatcher( filepath, 11, 9 ) ) ), 'location': LocationMatcher( filepath, 11, 3 ) } ) ) } ) } } )
def has_joel_miller(): return has_entry("identities", contains(has_entries(id="0000000123456789", firstName="Joel", lastName="Miller", type="individual", works=contains(has_entries(title="Chevere!")))))
def CppBindings_IdentifierCompleter_test(): identifier_completer = ycm_core.IdentifierCompleter() identifiers = ycm_core.StringVector() identifiers.append( ToCppStr( 'foo' ) ) identifiers.append( ToCppStr( 'bar' ) ) identifiers.append( ToCppStr( 'baz' ) ) identifier_completer.AddIdentifiersToDatabase( identifiers, ToCppStr( 'foo' ), ToCppStr( 'file' ) ) del identifiers query_fo_10 = identifier_completer.CandidatesForQueryAndType( ToCppStr( 'fo' ), ToCppStr( 'foo' ), 10 ) query_fo = identifier_completer.CandidatesForQueryAndType( ToCppStr( 'fo' ), ToCppStr( 'foo' ) ) query_a = identifier_completer.CandidatesForQueryAndType( ToCppStr( 'a' ), ToCppStr( 'foo' ) ) assert_that( query_fo_10, contains( 'foo' ) ) assert_that( query_fo, contains( 'foo' ) ) assert_that( query_a, contains( 'bar', 'baz' ) ) identifiers = ycm_core.StringVector() identifiers.append( ToCppStr( 'oof' ) ) identifiers.append( ToCppStr( 'rab' ) ) identifiers.append( ToCppStr( 'zab' ) ) identifier_completer.ClearForFileAndAddIdentifiersToDatabase( identifiers, ToCppStr( 'foo' ), ToCppStr( 'file' ) ) query_a_10 = identifier_completer.CandidatesForQueryAndType( ToCppStr( 'a' ), ToCppStr( 'foo' ) ) assert_that( query_a_10, contains( 'rab', 'zab' ) )
def test_good_gotoassignment_do_not_follow_imports(): app = TestApp(handlers.app) filepath = fixture_filepath('follow_imports', 'importer.py') request_data = { 'source': read_file(filepath), 'line': 3, 'col': 9, 'source_path': filepath } expected_definition = { 'module_path': filepath, 'name': 'imported_function', 'type': 'function', 'in_builtin_module': False, 'line': 1, 'column': 21, 'docstring': 'imported_function()\n\n', 'description': 'def imported_function', 'full_name': 'imported.imported_function', 'is_keyword': False } definitions = app.post_json('/gotoassignment', request_data).json['definitions'] assert_that(definitions, contains(expected_definition)) request_data['follow_imports'] = False definitions = app.post_json('/gotoassignment', request_data).json['definitions'] assert_that(definitions, contains(expected_definition))
def test_dashboard_with_section_slug_returns_module_and_children(self): dashboard = DashboardFactory(slug='my-first-slug') dashboard.owners.add(self.user) module_type = ModuleTypeFactory() parent = ModuleFactory( type=module_type, slug='section-we-want', order=1, dashboard=dashboard ) ModuleFactory( type=module_type, slug='module-we-want', order=2, dashboard=dashboard, parent=parent) resp = self.client.get( '/public/dashboards', {'slug': 'my-first-slug/section-we-want'}) data = json.loads(resp.content) assert_that(data['modules'], contains(has_entry('slug', 'section-we-want'))) assert_that(len(data['modules']), equal_to(1)) assert_that(data['modules'][0]['modules'], contains(has_entry('slug', 'module-we-want'))) assert_that(data, has_entry('page-type', 'module'))
def test_dashboard_with_tab_slug_only_returns_tab(self): dashboard = DashboardFactory(slug='my-first-slug') dashboard.owners.add(self.user) module_type = ModuleTypeFactory() ModuleFactory( type=module_type, dashboard=dashboard, slug='module-we-want', info=['module-info'], title='module-title', options={ 'tabs': [ { 'slug': 'tab-we-want', 'title': 'tab-title' }, { 'slug': 'tab-we-dont-want', } ] }) ModuleFactory( type=module_type, dashboard=dashboard, slug='module-we-dont-want') resp = self.client.get( '/public/dashboards', {'slug': 'my-first-slug/module-we-want/module-we-want-tab-we-want'} ) data = json.loads(resp.content) assert_that(data['modules'], contains( has_entries({'slug': 'tab-we-want', 'info': contains('module-info'), 'title': 'module-title - tab-title' }))) assert_that(data, has_entry('page-type', 'module'))
def DebugInfo_test( app ): request_data = BuildRequest( filetype = 'javascript' ) assert_that( app.post_json( '/debug_info', request_data ).json, has_entry( 'completer', has_entries( { 'name': 'JavaScript', 'servers': contains( has_entries( { 'name': 'Tern', 'is_running': instance_of( bool ), 'executable': instance_of( str ), 'pid': instance_of( int ), 'address': instance_of( str ), 'port': instance_of( int ), 'logfiles': contains( instance_of( str ), instance_of( str ) ), 'extras': contains( has_entries( { 'key': 'configuration file', 'value': instance_of( str ) } ), has_entries( { 'key': 'working directory', 'value': instance_of( str ) } ) ), } ) ), 'items': empty() } ) ) )
def Subcommands_OrganizeImports_test( app ): filepath = PathToTestFile( 'imports.js' ) RunTest( app, { 'description': 'OrganizeImports removes unused imports, ' 'coalesces imports from the same module, and sorts them', 'request': { 'command': 'OrganizeImports', 'filepath': filepath, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( matches_regexp( 'import \* as lib from "library";\r?\n' 'import func, { func1, func2 } from "library";\r?\n' ), LocationMatcher( filepath, 1, 1 ), LocationMatcher( filepath, 2, 1 ) ), ChunkMatcher( '', LocationMatcher( filepath, 5, 1 ), LocationMatcher( filepath, 6, 1 ) ), ChunkMatcher( '', LocationMatcher( filepath, 9, 1 ), LocationMatcher( filepath, 10, 1 ) ), ) } ) ) } ) } } )
def _FixIt_Check_cpp11_Repl(results): assert_that( results, has_entries( { "fixits": contains( has_entries( { "chunks": contains( has_entries( { "replacement_text": equal_to("foo"), "range": has_entries( { "start": has_entries({"line_num": 40, "column_num": 6}), "end": has_entries({"line_num": 40, "column_num": 9}), } ), } ) ), "location": has_entries({"line_num": 40, "column_num": 6}), } ) ) } ), )
def _FixIt_Check_objc(results): assert_that( results, has_entries( { "fixits": contains( has_entries( { "chunks": contains( has_entries( { "replacement_text": equal_to("id"), "range": has_entries( { "start": has_entries({"line_num": 5, "column_num": 3}), "end": has_entries({"line_num": 5, "column_num": 3}), } ), } ) ), "location": has_entries({"line_num": 5, "column_num": 3}), } ) ) } ), )
def DebugInfo_test( app ): request_data = BuildRequest( filetype = 'java' ) assert_that( app.post_json( '/debug_info', request_data ).json, has_entry( 'completer', has_entries( { 'name': 'Java', 'servers': contains( has_entries( { 'name': 'jdt.ls Java Language Server', 'is_running': instance_of( bool ), 'executable': instance_of( str ), 'pid': instance_of( int ), 'logfiles': contains( instance_of( str ), instance_of( str ) ), 'extras': contains( has_entries( { 'key': 'Startup Status', 'value': instance_of( str ) } ), has_entries( { 'key': 'Java Path', 'value': instance_of( str ) } ), has_entries( { 'key': 'Launcher Config.', 'value': instance_of( str ) } ), has_entries( { 'key': 'Project Directory', 'value': instance_of( str ) } ), has_entries( { 'key': 'Workspace Path', 'value': instance_of( str ) } ) ) } ) ) } ) ) )
def test_basic_query_with_time_limits(self): self._save_all('foo_bar', {'_timestamp': d_tz(2012, 12, 12)}, {'_timestamp': d_tz(2012, 12, 14)}, {'_timestamp': d_tz(2012, 12, 11)}) # start at results = self.engine.execute_query('foo_bar', Query.create( start_at=d_tz(2012, 12, 12, 13))) assert_that(results, contains( has_entry('_timestamp', d_tz(2012, 12, 14)))) # end at results = self.engine.execute_query('foo_bar', Query.create( end_at=d_tz(2012, 12, 11, 13))) assert_that(results, contains( has_entry('_timestamp', d_tz(2012, 12, 11)))) # both results = self.engine.execute_query('foo_bar', Query.create( start_at=d_tz(2012, 12, 11, 12), end_at=d_tz(2012, 12, 12, 12))) assert_that(results, contains( has_entry('_timestamp', d_tz(2012, 12, 12))))
def LanguageServerCompleter_Diagnostics_PercentEncodeCannonical_test(): completer = MockCompleter() filepath = os.path.realpath( '/foo?' ) uri = lsp.FilePathToUri( filepath ) assert_that( uri, ends_with( '%3F' ) ) request_data = RequestWrap( BuildRequest( line_num = 1, column_num = 1, filepath = filepath, contents = '' ) ) notification = { 'jsonrpc': '2.0', 'method': 'textDocument/publishDiagnostics', 'params': { 'uri': uri.replace( '%3F', '%3f' ), 'diagnostics': [ { 'range': { 'start': { 'line': 3, 'character': 10 }, 'end': { 'line': 3, 'character': 11 } }, 'severity': 1, 'message': 'First error' } ] } } completer.GetConnection()._notifications.put( notification ) completer.HandleNotificationInPollThread( notification ) with patch.object( completer, 'ServerIsReady', return_value = True ): completer.SendInitialize( request_data, completer._GetSettingsFromExtraConf( request_data ) ) # Simulate receipt of response and initialization complete initialize_response = { 'result': { 'capabilities': {} } } completer._HandleInitializeInPollThread( initialize_response ) diagnostics = contains( has_entries( { 'kind': equal_to( 'ERROR' ), 'location': LocationMatcher( filepath, 4, 11 ), 'location_extent': RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ), 'ranges': contains( RangeMatcher( filepath, ( 4, 11 ), ( 4, 12 ) ) ), 'text': equal_to( 'First error' ), 'fixit_available': False } ) ) assert_that( completer.OnFileReadyToParse( request_data ), diagnostics ) assert_that( completer.PollForMessages( request_data ), contains( has_entries( { 'diagnostics': diagnostics, 'filepath': filepath } ) ) )
def Subcommands_FixIt_Unity_test( app ): file_path = PathToTestFile( 'unitya.cc' ) args = { 'filetype' : 'cpp', 'completer_target' : 'filetype_default', 'contents' : ReadFile( file_path ), 'filepath' : file_path, 'command_arguments': [ 'FixIt' ], 'line_num' : 11, 'column_num' : 17, } app.post_json( '/load_extra_conf_file', { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ), } ) # Get the diagnostics for the file. event_data = BuildRequest( **args ) results = app.post_json( '/run_completer_command', event_data ).json pprint( results ) assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': contains_string( "expected ';' after expression" ), 'chunks': contains( ChunkMatcher( ';', LocationMatcher( file_path, 11, 18 ), LocationMatcher( file_path, 11, 18 ) ), ), 'location': LocationMatcher( file_path, 11, 18 ), } ) ) } ) )
def _FixIt_Check_cpp11_Del(results): # Removal of :: assert_that( results, has_entries( { "fixits": contains( has_entries( { "chunks": contains( has_entries( { "replacement_text": equal_to(""), "range": has_entries( { "start": has_entries({"line_num": 35, "column_num": 7}), "end": has_entries({"line_num": 35, "column_num": 9}), } ), } ) ), "location": has_entries({"line_num": 35, "column_num": 7}), } ) ) } ), )
def test_period_group_query_adds_missing_periods_in_correct_order(self): self.mock_storage.execute_query.return_value = [ {'some_group': 'val1', '_week_start_at': d(2013, 1, 14), '_count': 23}, {'some_group': 'val1', '_week_start_at': d(2013, 1, 21), '_count': 41}, {'some_group': 'val2', '_week_start_at': d(2013, 1, 14), '_count': 31}, {'some_group': 'val2', '_week_start_at': d(2013, 1, 28), '_count': 12}, ] data = self.data_set.execute_query( Query.create(period=WEEK, group_by=['some_group'], start_at=d_tz(2013, 1, 7, 0, 0, 0), end_at=d_tz(2013, 2, 4, 0, 0, 0))) assert_that(data, has_item(has_entries({ "some_group": "val1", "values": contains( has_entries({"_start_at": d_tz(2013, 1, 7), "_count": 0}), has_entries({"_start_at": d_tz(2013, 1, 14), "_count": 23}), has_entries({"_start_at": d_tz(2013, 1, 21), "_count": 41}), has_entries({"_start_at": d_tz(2013, 1, 28), "_count": 0}), ), }))) assert_that(data, has_item(has_entries({ "some_group": "val2", "values": contains( has_entries({"_start_at": d_tz(2013, 1, 7), "_count": 0}), has_entries({"_start_at": d_tz(2013, 1, 14), "_count": 31}), has_entries({"_start_at": d_tz(2013, 1, 21), "_count": 0}), has_entries({"_start_at": d_tz(2013, 1, 28), "_count": 12}), ), })))
def test_display_on_headers_with_no_title(self): result = self.headers('test') assert_that(result['column_headers'], contains('fn', 'ln', 'Empty', None, 'Default')) assert_that(result['column_types'], contains('firstname', None, None, 'status', None))
def test_find_multiple_values(self): series = pd.Series(["hostile", "friendly", "friendly", "not_friendly"], index=["wolf", "cat", "dog", "mouse"]) indices = pandas_util.find(series, ["friendly", "not_friendly"]) assert_that(indices, contains("cat", "dog", "mouse"))
def test_find_one_value(self): series = pd.Series(["friendly", "friendly", "not_friendly"], index=["cat", "dog", "mouse"]) indices = pandas_util.find(series, "friendly") assert_that(indices, contains("cat", "dog"))
def Subcommands_RefactorRename_MultipleFiles_OnFileReadyToParse_test( app ): WaitUntilCompleterServerReady( app, 'javascript' ) file1 = PathToTestFile( 'file1.js' ) file2 = PathToTestFile( 'file2.js' ) file3 = PathToTestFile( 'file3.js' ) # This test is roughly the same as the previous one, except here file4.js is # pushed into the Tern engine via 'opening it in the editor' (i.e. # FileReadyToParse event). The first 3 are loaded into the tern server # because they are listed in the .tern-project file's loadEagerly option. file4 = PathToTestFile( 'file4.js' ) app.post_json( '/event_notification', BuildRequest( **{ 'filetype': 'javascript', 'event_name': 'FileReadyToParse', 'contents': ReadFile( file4 ), 'filepath': file4, } ), expect_errors = False ) RunTest( app, { 'description': 'FileReadyToParse loads files into tern server', 'request': { 'command': 'RefactorRename', 'arguments': [ 'a-quite-long-string' ], 'filepath': file1, 'line_num': 3, 'column_num': 14, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( 'a-quite-long-string', LocationMatcher( file1, 1, 5 ), LocationMatcher( file1, 1, 11 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file1, 3, 14 ), LocationMatcher( file1, 3, 20 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file2, 2, 14 ), LocationMatcher( file2, 2, 20 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file3, 3, 12 ), LocationMatcher( file3, 3, 18 ) ), ChunkMatcher( 'a-quite-long-string', LocationMatcher( file4, 4, 22 ), LocationMatcher( file4, 4, 28 ) ) ), 'location': LocationMatcher( file1, 3, 14 ) } ) ) } ) } } )
def GetCompletions_ClientDataGivenToExtraConf_Cache_test(app): app.post_json( '/load_extra_conf_file', {'filepath': PathToTestFile('client_data', '.ycm_extra_conf.py')}) filepath = PathToTestFile('client_data', 'macro.cpp') contents = ReadFile(filepath) request = { 'filetype': 'cpp', 'filepath': filepath, 'contents': contents, 'line_num': 11, 'column_num': 8 } # Complete with flags from the client. completion_request = CombineRequest( request, {'extra_conf_data': { 'flags': ['-DSOME_MACRO'] }}) assert_that( app.post_json('/completions', completion_request).json, has_entries({ 'completions': has_item(CompletionEntryMatcher('macro_defined')), 'errors': empty() })) # Complete at the same position but for a different set of flags from the # client. completion_request = CombineRequest( request, {'extra_conf_data': { 'flags': ['-Wall'] }}) assert_that( app.post_json('/completions', completion_request).json, has_entries({ 'completions': has_item(CompletionEntryMatcher('macro_not_defined')), 'errors': empty() })) # Finally, complete once again at the same position but no flags are given by # the client. An empty list of flags is returned by the extra conf file in # that case. completion_request = CombineRequest(request, {}) assert_that( app.post_json('/completions', completion_request).json, has_entries({ 'completions': empty(), 'errors': contains( ErrorMatcher(RuntimeError, 'Still no compile flags, no completions yet.')) }))
def check_unchanged_capacity(context): assert_that( [rg.target_capacity for rg in context.pool_manager.resource_groups.values()], contains(*context.original_capacities), )
def test_find_all_by_no_ivr(self): result = ivr_dao.find_all_by(description='toto') assert_that(result, contains())
def SignatureHelp_MultipleSignatures_test(app): filepath = PathToTestFile('testy', 'ContinuousTest.cs') contents = ReadFile(filepath) request = BuildRequest(line_num=18, column_num=15, filetypes=['cs'], filepath=filepath, contents=contents) with WrapOmniSharpServer(app, filepath): response = app.post_json('/signature_help', request).json LOGGER.debug('response = %s', response) assert_that( response, has_entries({ 'errors': empty(), 'signature_help': has_entries({ 'activeSignature': 0, 'activeParameter': 0, 'signatures': contains( SignatureMatcher( 'void ContinuousTest.Overloaded(int i, int a)', [ ParameterMatcher(31, 36), ParameterMatcher(38, 43) ]), SignatureMatcher( 'void ContinuousTest.Overloaded(string s)', [ParameterMatcher(31, 39)]), ) }) })) request['column_num'] = 20 with WrapOmniSharpServer(app, filepath): response = app.post_json('/signature_help', request).json LOGGER.debug('response = %s', response) assert_that( response, has_entries({ 'errors': empty(), 'signature_help': has_entries({ 'activeSignature': 0, 'activeParameter': 1, 'signatures': contains( SignatureMatcher( 'void ContinuousTest.Overloaded(int i, int a)', [ ParameterMatcher(31, 36), ParameterMatcher(38, 43) ]), SignatureMatcher( 'void ContinuousTest.Overloaded(string s)', [ParameterMatcher(31, 39)]), ) }) }))
def EventNotification_OnBufferUnload_CloseFile_test(app): # Open main.ts file in a buffer. main_filepath = PathToTestFile('buffer_unload', 'main.ts') main_contents = ReadFile(main_filepath) event_data = BuildRequest(filepath=main_filepath, filetype='typescript', contents=main_contents, event_name='BufferVisit') app.post_json('/event_notification', event_data) # Complete in main.ts buffer an object defined in imported.ts. completion_data = BuildRequest(filepath=main_filepath, filetype='typescript', contents=main_contents, line_num=3, column_num=10) response = app.post_json('/completions', completion_data) assert_that( response.json, has_entries( {'completions': contains(CompletionEntryMatcher('method'))})) # Open imported.ts file in another buffer. imported_filepath = PathToTestFile('buffer_unload', 'imported.ts') imported_contents = ReadFile(imported_filepath) event_data = BuildRequest(filepath=imported_filepath, filetype='typescript', contents=imported_contents, event_name='BufferVisit') app.post_json('/event_notification', event_data) # Modify imported.ts buffer without writing the changes to disk. modified_imported_contents = imported_contents.replace( 'method', 'modified_method') # FIXME: TypeScript completer should not rely on the FileReadyToParse events # to synchronize the contents of dirty buffers but use instead the file_data # field of the request. event_data = BuildRequest(filepath=imported_filepath, filetype='typescript', contents=modified_imported_contents, event_name='FileReadyToParse') app.post_json('/event_notification', event_data) # Complete at same location in main.ts buffer. imported_data = { imported_filepath: { 'filetypes': ['typescript'], 'contents': modified_imported_contents } } completion_data = BuildRequest(filepath=main_filepath, filetype='typescript', contents=main_contents, line_num=3, column_num=10, file_data=imported_data) response = app.post_json('/completions', completion_data) assert_that( response.json, has_entries({ 'completions': contains(CompletionEntryMatcher('modified_method')) })) # Unload imported.ts buffer. event_data = BuildRequest(filepath=imported_filepath, filetype='typescript', contents=imported_contents, event_name='BufferUnload') app.post_json('/event_notification', event_data) # Complete at same location in main.ts buffer. completion_data = BuildRequest(filepath=main_filepath, filetype='typescript', contents=main_contents, line_num=3, column_num=10) response = app.post_json('/completions', completion_data) assert_that( response.json, has_entries( {'completions': contains(CompletionEntryMatcher('method'))}))
def has_args_containing(message): return has_property("args", contains(message))
MAIN_FILEPATH = PathToTestFile('common', 'src', 'main.rs') DIAG_MATCHERS_PER_FILE = { MAIN_FILEPATH: contains_inanyorder( has_entries({ 'kind': 'ERROR', 'text': 'no field `build_` on type `test::Builder`\n\nunknown field', 'location': LocationMatcher(MAIN_FILEPATH, 14, 13), 'location_extent': RangeMatcher(MAIN_FILEPATH, (14, 13), (14, 19)), 'ranges': contains(RangeMatcher(MAIN_FILEPATH, (14, 13), (14, 19))), 'fixit_available': False })) } @SharedYcmd def Diagnostics_FileReadyToParse_test(app): filepath = PathToTestFile('common', 'src', 'main.rs') contents = ReadFile(filepath) # It can take a while for the diagnostics to be ready. results = WaitForDiagnosticsToBeReady(app, filepath, contents, 'rust') print('completer response: {}'.format(pformat(results)))
def test_tenant_segregation(self): # This test will use the following tenant scructure # top # / | \ # a e h # / \ | # d b f # /\ # g c top_uuid = self._top_tenant_uuid() a_uuid = self._create_tenant(name='a', parent_uuid=top_uuid) e_uuid = self._create_tenant(name='e', parent_uuid=top_uuid) h_uuid = self._create_tenant(name='h', parent_uuid=top_uuid) d_uuid = self._create_tenant(name='d', parent_uuid=a_uuid) b_uuid = self._create_tenant(name='b', parent_uuid=a_uuid) f_uuid = self._create_tenant(name='f', parent_uuid=e_uuid) g_uuid = self._create_tenant(name='g', parent_uuid=b_uuid) c_uuid = self._create_tenant(name='c', parent_uuid=b_uuid) # No scoping tenant returns all tenants result = self._tenant_dao.list_visible_tenants() assert_that( result, contains_inanyorder( has_properties(uuid=top_uuid), has_properties(uuid=a_uuid), has_properties(uuid=b_uuid), has_properties(uuid=c_uuid), has_properties(uuid=d_uuid), has_properties(uuid=e_uuid), has_properties(uuid=f_uuid), has_properties(uuid=g_uuid), has_properties(uuid=h_uuid), ), ) # Top tenant sees everyone result = self._tenant_dao.list_visible_tenants( scoping_tenant_uuid=top_uuid) assert_that( result, contains_inanyorder( has_properties(uuid=top_uuid), has_properties(uuid=a_uuid), has_properties(uuid=b_uuid), has_properties(uuid=c_uuid), has_properties(uuid=d_uuid), has_properties(uuid=e_uuid), has_properties(uuid=f_uuid), has_properties(uuid=g_uuid), has_properties(uuid=h_uuid), ), ) # Leaves can see themselves only result = self._tenant_dao.list_visible_tenants( scoping_tenant_uuid=c_uuid) assert_that(result, contains(has_properties(uuid=c_uuid))) # An unknown tenant returns nothing result = self._tenant_dao.list_visible_tenants( scoping_tenant_uuid=constants.UNKNOWN_UUID) assert_that(result, empty()) # A tenant sees all of its subtenant and itself result = self._tenant_dao.list_visible_tenants( scoping_tenant_uuid=a_uuid) assert_that( result, contains_inanyorder( has_properties(uuid=a_uuid), has_properties(uuid=b_uuid), has_properties(uuid=c_uuid), has_properties(uuid=d_uuid), has_properties(uuid=g_uuid), ), )
def __init__(self, *element_matchers): delegates = [hc.has_item(e) for e in element_matchers] self.matcher_all = hc.all_of(*delegates) self.matcher_any = hc.any_of(*delegates) self.matcher_order = hc.contains(*element_matchers) self.order_seq = None
def Subcommands_RefactorRename_MultiFile_test(app): continuous_test = PathToTestFile('testy', 'ContinuousTest.cs') fixit_test = PathToTestFile('testy', 'FixItTestCase.cs') get_type_test = PathToTestFile('testy', 'GetTypeTestCase.cs') goto_test = PathToTestFile('testy', 'GotoTestCase.cs') import_test = PathToTestFile('testy', 'ImportTest.cs') program = PathToTestFile('testy', 'Program.cs') get_doc_test = PathToTestFile('testy', 'GetDocTestCase.cs') unicode_test = PathToTestFile('testy', 'Unicode.cs') with WrapOmniSharpServer(app, continuous_test): contents = ReadFile(continuous_test) request = BuildRequest(completer_target='filetype_default', command_arguments=['RefactorRename', 'x'], line_num=3, column_num=11, contents=contents, filetype='cs', filepath=continuous_test) response = app.post_json('/run_completer_command', request).json print('response = ', response) assert_that( response, has_entries({ 'fixits': contains( has_entries({ 'location': LocationMatcher(continuous_test, 3, 11), 'chunks': contains( has_entries({ 'replacement_text': 'x', 'range': RangeMatcher(continuous_test, (3, 11), (3, 16)) }), has_entries({ 'replacement_text': 'x', 'range': RangeMatcher(fixit_test, (1, 11), (1, 16)) }), has_entries({ 'replacement_text': 'x', 'range': RangeMatcher(get_doc_test, (4, 11), (4, 16)) }), has_entries({ 'replacement_text': 'x', 'range': RangeMatcher(get_type_test, (2, 11), (2, 16)) }), has_entries({ 'replacement_text': 'x', 'range': RangeMatcher(goto_test, (4, 11), (4, 16)) }), has_entries({ 'replacement_text': 'x', 'range': RangeMatcher(import_test, (3, 11), (3, 16)) }), has_entries({ 'replacement_text': 'x', 'range': RangeMatcher(program, (3, 11), (3, 16)) }), has_entries({ 'replacement_text': 'x', 'range': RangeMatcher(unicode_test, (4, 11), (4, 16)) }), ) })) }))
def step_impl(context): with open(context.test_file_out, "rb") as data_file: context.encrypted_file_contents = data_file.read() assert_that(len(context.encrypted_file_contents), greater_than(10)) assert_that(context.encrypted_file_contents, not_(contains(context.test_data)))
def test_is_initially_emtpy(): history = ProjectHistory() assert_that(history, contains(), "history with no element") assert_that(history, empty(), "empty history")
def test_split_caller_callee_cels_no_cels(self): cels = [] result = self.cel_dispatcher.split_caller_callee_cels(cels) assert_that(result, contains(contains(), contains()))
def test_that_listing_personal_with_profile_empty_returns_empty_list(self): result = self.get_personal_with_profile('default') assert_that(result['results'], contains())
def test_that_listing_empty_personal_returns_empty_list(self): result = self.list_personal() assert_that(result['items'], contains())
def test_lookup_should_work_without_unique_column(self): result = self.backend.search('lice') assert_that(result, contains(has_entries(**self._alice)))
def test_lookup_with_accents_in_the_result(self): result = self.backend.search('lol') assert_that(result, contains(has_entries(**self._pepe)))
def user_hint_updated(): result = self.amid.action('Getvar', {'Variable': f'DEVICE_STATE(Custom:{user_uuid}-mobile)'}) assert_that(result, contains(has_entries( Response='Success', Value='UNAVAILABLE', )))
def _MakeRelativePathsInFlagsAbsoluteTest(test): wd = test['wd'] if 'wd' in test else '/not_test' assert_that(flags._MakeRelativePathsInFlagsAbsolute(test['flags'], wd), contains(*test['expect']))
def Subcommands_Format_WholeFile_Tabs_test( app ): filepath = PathToTestFile( 'test.ts' ) RunTest( app, { 'description': 'Formatting is applied on the whole file ' 'with tabs composed of 2 spaces', 'request': { 'command': 'Format', 'filepath': filepath, 'options': { 'tab_size': 4, 'insert_spaces': False } }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( '\t', LocationMatcher( filepath, 3, 1 ), LocationMatcher( filepath, 3, 3 ) ), ChunkMatcher( '\t', LocationMatcher( filepath, 4, 1 ), LocationMatcher( filepath, 4, 3 ) ), ChunkMatcher( ' ', LocationMatcher( filepath, 4, 14 ), LocationMatcher( filepath, 4, 14 ) ), ChunkMatcher( '\t', LocationMatcher( filepath, 5, 1 ), LocationMatcher( filepath, 5, 3 ) ), ChunkMatcher( ' ', LocationMatcher( filepath, 5, 14 ), LocationMatcher( filepath, 5, 14 ) ), ChunkMatcher( '\t', LocationMatcher( filepath, 6, 1 ), LocationMatcher( filepath, 6, 3 ) ), ChunkMatcher( '\t\t', LocationMatcher( filepath, 7, 1 ), LocationMatcher( filepath, 7, 5 ) ), ChunkMatcher( '\t\t\t', LocationMatcher( filepath, 8, 1 ), LocationMatcher( filepath, 8, 7 ) ), ChunkMatcher( '\t\t\t', LocationMatcher( filepath, 9, 1 ), LocationMatcher( filepath, 9, 7 ) ), ChunkMatcher( '\t\t', LocationMatcher( filepath, 10, 1 ), LocationMatcher( filepath, 10, 5 ) ), ChunkMatcher( '\t', LocationMatcher( filepath, 11, 1 ), LocationMatcher( filepath, 11, 3 ) ), ChunkMatcher( ' ', LocationMatcher( filepath, 11, 6 ), LocationMatcher( filepath, 11, 6 ) ), ChunkMatcher( '\t', LocationMatcher( filepath, 27, 1 ), LocationMatcher( filepath, 27, 3 ) ), ChunkMatcher( '\t ', LocationMatcher( filepath, 28, 1 ), LocationMatcher( filepath, 28, 4 ) ), ChunkMatcher( '\t ', LocationMatcher( filepath, 29, 1 ), LocationMatcher( filepath, 29, 4 ) ), ChunkMatcher( '\t', LocationMatcher( filepath, 30, 1 ), LocationMatcher( filepath, 30, 3 ) ), ChunkMatcher( ' ', LocationMatcher( filepath, 30, 17 ), LocationMatcher( filepath, 30, 17 ) ), ) } ) ) } ) } } )
def test_that_list_returns_a_contact(self): unknown_id = '12' result = self.backend.list([self._benoit['id'], unknown_id]) assert_that(result, contains(has_entries(**self._benoit)))
def test_given_no_voicemails_then_returns_empty_list(self): result = user_voicemail_dao.find_all_by_voicemail_id(1) assert_that(result, contains())
def FixIt_Check_cpp11_MultiSecond(results): assert_that( results, has_entries({ 'fixits': contains( # first fix-it at 54,16 has_entries({ 'chunks': contains( has_entries({ 'replacement_text': equal_to('foo'), 'range': has_entries({ 'start': has_entries({ 'line_num': 54, 'column_num': 16 }), 'end': has_entries({ 'line_num': 54, 'column_num': 19 }), }), })), 'location': has_entries({ 'line_num': 54, 'column_num': 51 }) }), # second fix-it at 54,52 has_entries({ 'chunks': contains( has_entries({ 'replacement_text': equal_to(''), 'range': has_entries({ 'start': has_entries({ 'line_num': 54, 'column_num': 52 }), 'end': has_entries({ 'line_num': 54, 'column_num': 53 }), }), }), has_entries({ 'replacement_text': equal_to('~'), 'range': has_entries({ 'start': has_entries({ 'line_num': 54, 'column_num': 58 }), 'end': has_entries({ 'line_num': 54, 'column_num': 58 }), }), }), ), 'location': has_entries({ 'line_num': 54, 'column_num': 51 }) })) }))
def YouCompleteMe_AsyncDiagnosticUpdate_SingleFile_test( ycm, post_vim_message, *args): # This test simulates asynchronous diagnostic updates associated with a single # file (e.g. Translation Unit), but where the actual errors refer to other # open files and other non-open files. This is not strictly invalid, nor is it # completely normal, but it is supported and does work. # Contrast with the next test which sends the diagnostics filewise, which is # what the language server protocol will do. diagnostics = [{ 'kind': 'ERROR', 'text': 'error text in current buffer', 'location': { 'filepath': '/current', 'line_num': 1, 'column_num': 1 }, 'location_extent': { 'start': { 'filepath': '/current', 'line_num': 1, 'column_num': 1, }, 'end': { 'filepath': '/current', 'line_num': 1, 'column_num': 1, } }, 'ranges': [] }, { 'kind': 'ERROR', 'text': 'error text in hidden buffer', 'location': { 'filepath': '/has_diags', 'line_num': 4, 'column_num': 2 }, 'location_extent': { 'start': { 'filepath': '/has_diags', 'line_num': 4, 'column_num': 2, }, 'end': { 'filepath': '/has_diags', 'line_num': 4, 'column_num': 2, } }, 'ranges': [] }, { 'kind': 'ERROR', 'text': 'error text in buffer not open in Vim', 'location': { 'filepath': '/not_open', 'line_num': 8, 'column_num': 4 }, 'location_extent': { 'start': { 'filepath': '/not_open', 'line_num': 8, 'column_num': 4, }, 'end': { 'filepath': '/not_open', 'line_num': 8, 'column_num': 4, } }, 'ranges': [] }] current_buffer = VimBuffer('/current', filetype='ycmtest', contents=['current'] * 10, number=1) no_diags_buffer = VimBuffer('/no_diags', filetype='ycmtest', contents=['nodiags'] * 10, number=2) hidden_buffer = VimBuffer('/has_diags', filetype='ycmtest', contents=['hasdiags'] * 10, number=3) buffers = [current_buffer, no_diags_buffer, hidden_buffer] windows = [current_buffer, no_diags_buffer] # Register each buffer internally with YCM for current in buffers: with MockVimBuffers(buffers, [current]): ycm.OnFileReadyToParse() with patch('ycm.vimsupport.SetLocationListForWindow', new_callable=ExtendedMock) as set_location_list_for_window: with MockVimBuffers(buffers, windows): ycm.UpdateWithNewDiagnosticsForFile('/current', diagnostics) # We update the diagnostic on the current cursor position post_vim_message.assert_has_exact_calls([ call("error text in current buffer", truncate=True, warning=False), ]) # Ensure we included all the diags though set_location_list_for_window.assert_has_exact_calls([ call( 1, [ { 'lnum': 1, 'col': 1, 'bufnr': 1, 'valid': 1, 'type': 'E', 'text': 'error text in current buffer', }, { 'lnum': 4, 'col': 2, 'bufnr': 3, 'valid': 1, 'type': 'E', 'text': 'error text in hidden buffer', }, { 'lnum': 8, 'col': 4, 'bufnr': -1, # sic: Our mocked bufnr function actually returns -1, # even though YCM is passing "create if needed". # FIXME? we shouldn't do that, and we should pass # filename instead 'valid': 1, 'type': 'E', 'text': 'error text in buffer not open in Vim' } ]) ]) assert_that( test_utils.VIM_MATCHES_FOR_WINDOW, has_entries({ 1: contains( VimMatch('YcmErrorSection', '\\%1l\\%1c\\_.\\{-}\\%1l\\%1c')) }))
def YouCompleteMe_AsyncDiagnosticUpdate_PerFile_test(ycm, post_vim_message, *args): # This test simulates asynchronous diagnostic updates which are delivered per # file, including files which are open and files which are not. # Ordered to ensure that the calls to update are deterministic diagnostics_per_file = [ ('/current', [{ 'kind': 'ERROR', 'text': 'error text in current buffer', 'location': { 'filepath': '/current', 'line_num': 1, 'column_num': 1 }, 'location_extent': { 'start': { 'filepath': '/current', 'line_num': 1, 'column_num': 1, }, 'end': { 'filepath': '/current', 'line_num': 1, 'column_num': 1, } }, 'ranges': [], }]), ('/separate_window', [{ 'kind': 'ERROR', 'text': 'error text in a buffer open in a separate window', 'location': { 'filepath': '/separate_window', 'line_num': 3, 'column_num': 3 }, 'location_extent': { 'start': { 'filepath': '/separate_window', 'line_num': 3, 'column_num': 3, }, 'end': { 'filepath': '/separate_window', 'line_num': 3, 'column_num': 3, } }, 'ranges': [] }]), ('/hidden', [{ 'kind': 'ERROR', 'text': 'error text in hidden buffer', 'location': { 'filepath': '/hidden', 'line_num': 4, 'column_num': 2 }, 'location_extent': { 'start': { 'filepath': '/hidden', 'line_num': 4, 'column_num': 2, }, 'end': { 'filepath': '/hidden', 'line_num': 4, 'column_num': 2, } }, 'ranges': [] }]), ('/not_open', [{ 'kind': 'ERROR', 'text': 'error text in buffer not open in Vim', 'location': { 'filepath': '/not_open', 'line_num': 8, 'column_num': 4 }, 'location_extent': { 'start': { 'filepath': '/not_open', 'line_num': 8, 'column_num': 4, }, 'end': { 'filepath': '/not_open', 'line_num': 8, 'column_num': 4, } }, 'ranges': [] }]) ] current_buffer = VimBuffer('/current', filetype='ycmtest', contents=['current'] * 10, number=1) no_diags_buffer = VimBuffer('/no_diags', filetype='ycmtest', contents=['no_diags'] * 10, number=2) separate_window = VimBuffer('/separate_window', filetype='ycmtest', contents=['separate_window'] * 10, number=3) hidden_buffer = VimBuffer('/hidden', filetype='ycmtest', contents=['hidden'] * 10, number=4) buffers = [current_buffer, no_diags_buffer, separate_window, hidden_buffer] windows = [current_buffer, no_diags_buffer, separate_window] # Register each buffer internally with YCM for current in buffers: with MockVimBuffers(buffers, [current]): ycm.OnFileReadyToParse() with patch('ycm.vimsupport.SetLocationListForWindow', new_callable=ExtendedMock) as set_location_list_for_window: with MockVimBuffers(buffers, windows): for filename, diagnostics in diagnostics_per_file: ycm.UpdateWithNewDiagnosticsForFile(filename, diagnostics) # We update the diagnostic on the current cursor position post_vim_message.assert_has_exact_calls([ call("error text in current buffer", truncate=True, warning=False), ]) # Ensure we included all the diags though set_location_list_for_window.assert_has_exact_calls([ call(1, [ { 'lnum': 1, 'col': 1, 'bufnr': 1, 'valid': 1, 'type': 'E', 'text': 'error text in current buffer', }, ]), call(3, [ { 'lnum': 3, 'col': 3, 'bufnr': 3, 'valid': 1, 'type': 'E', 'text': 'error text in a buffer open in a separate window', }, ]) ]) # FIXME: diagnostic matches in windows other than the current one are not # updated. assert_that( test_utils.VIM_MATCHES_FOR_WINDOW, has_entries({ 1: contains( VimMatch('YcmErrorSection', '\\%1l\\%1c\\_.\\{-}\\%1l\\%1c')) }))
def YouCompleteMe_UpdateDiagnosticInterface(ycm, post_vim_message, *args): contents = """int main() { int x, y; x == y }""" # List of diagnostics returned by ycmd for the above code. diagnostics = [ { 'kind': 'ERROR', 'text': "expected ';' after expression", 'location': { 'filepath': 'buffer', 'line_num': 3, 'column_num': 9 }, # Looks strange but this is really what ycmd is returning. 'location_extent': { 'start': { 'filepath': '', 'line_num': 0, 'column_num': 0, }, 'end': { 'filepath': '', 'line_num': 0, 'column_num': 0, } }, 'ranges': [], 'fixit_available': True }, { 'kind': 'WARNING', 'text': 'equality comparison result unused', 'location': { 'filepath': 'buffer', 'line_num': 3, 'column_num': 7, }, 'location_extent': { 'start': { 'filepath': 'buffer', 'line_num': 3, 'column_num': 5, }, 'end': { 'filepath': 'buffer', 'line_num': 3, 'column_num': 7, } }, 'ranges': [{ 'start': { 'filepath': 'buffer', 'line_num': 3, 'column_num': 3, }, 'end': { 'filepath': 'buffer', 'line_num': 3, 'column_num': 9, } }], 'fixit_available': True } ] current_buffer = VimBuffer('buffer', filetype='c', contents=contents.splitlines(), number=5) test_utils.VIM_SIGNS = [] vimsupport.SIGN_ID_FOR_BUFFER.clear() with MockVimBuffers([current_buffer], [current_buffer], (3, 1)): with patch('ycm.client.event_notification.EventNotification.Response', return_value=diagnostics): ycm.OnFileReadyToParse() ycm.HandleFileParseRequest(block=True) # The error on the current line is echoed, not the warning. post_vim_message.assert_called_once_with( "expected ';' after expression (FixIt)", truncate=True, warning=False) # Error match is added after warning matches. assert_that( test_utils.VIM_MATCHES_FOR_WINDOW, has_entries({ 1: contains( VimMatch('YcmWarningSection', '\\%3l\\%5c\\_.\\{-}\\%3l\\%7c'), VimMatch('YcmWarningSection', '\\%3l\\%3c\\_.\\{-}\\%3l\\%9c'), VimMatch('YcmErrorSection', '\\%3l\\%8c')) })) # Only the error sign is placed. assert_that( test_utils.VIM_SIGNS, contains(VimSign(SIGN_BUFFER_ID_INITIAL_VALUE, 3, 'YcmError', 5))) # The error is not echoed again when moving the cursor along the line. with MockVimBuffers([current_buffer], [current_buffer], (3, 2)): post_vim_message.reset_mock() ycm.OnCursorMoved() post_vim_message.assert_not_called() # The error is cleared when moving the cursor to another line. with MockVimBuffers([current_buffer], [current_buffer], (2, 2)): post_vim_message.reset_mock() ycm.OnCursorMoved() post_vim_message.assert_called_once_with("", warning=False) # The error is echoed when moving the cursor back. with MockVimBuffers([current_buffer], [current_buffer], (3, 2)): post_vim_message.reset_mock() ycm.OnCursorMoved() post_vim_message.assert_called_once_with( "expected ';' after expression (FixIt)", truncate=True, warning=False) with patch('ycm.client.event_notification.EventNotification.Response', return_value=diagnostics[1:]): ycm.OnFileReadyToParse() ycm.HandleFileParseRequest(block=True) assert_that( test_utils.VIM_MATCHES_FOR_WINDOW, has_entries({ 1: contains( VimMatch('YcmWarningSection', '\\%3l\\%5c\\_.\\{-}\\%3l\\%7c'), VimMatch('YcmWarningSection', '\\%3l\\%3c\\_.\\{-}\\%3l\\%9c')) })) assert_that( test_utils.VIM_SIGNS, contains( VimSign(SIGN_BUFFER_ID_INITIAL_VALUE + 1, 3, 'YcmWarning', 5)))
def test_that_searching_for_result_with_non_ascii(self): results = self.backend.search('dré') assert_that(results, contains(has_entries(**self._andree_anne)))