def test_process_parser_analytics_count(self): """ Test that we correctly process parser with count metric """ os.environ['TERM'] = 'dumb' app = Logria(None, False, False) # Fake window size: 10 x 100 app.height = 10 app.width = 100 # Set fake previous render app.last_row = app.height - 3 # simulate the last row we can render to app.current_end = 80 # Simulate the last message rendered # Set fake messages app.messages = [str(x) for x in range(10)] app.parser_index = 0 app.last_index_processed = 0 app.analytics_enabled = True # Set parser, activate app.parser = Parser() app.parser.set_pattern( pattern=r'(\d)', type_='regex', name='Test', example='4', analytics_methods={ 'Item': 'count' } ) # Set analytics method manually app.parser._analytics_map = dict( zip(range(len(app.parser._analytics_methods.keys())), app.parser._analytics_methods.keys())) # Store previous message pointer app.previous_messages = app.messages # Process parser process_parser(app) self.assertEqual( app.messages, ['Item', ' 0: 1', ' 1: 1', ' 2: 1', ' 3: 1', ' 4: 1'])
def test_process_parser_invalid_index(self): """ Test that we correctly process parser with invalid index """ os.environ['TERM'] = 'dumb' app = Logria(None, False, False) # Fake window size: 10 x 100 app.height = 10 app.width = 100 # Set fake previous render app.last_row = app.height - 3 # simulate the last row we can render to app.current_end = 80 # Simulate the last message rendered # Set fake messages app.messages = [f'{x}+{x}+{x}' for x in range(10)] app.parser_index = 3 app.last_index_processed = 0 # Set parser, activate app.parser = Parser() app.parser.set_pattern( pattern='\+', type_='split', name='Test', example='a-a', analytics_methods={ 'Item 1': 'count', 'Item 2': 'count' } ) # Store previous message pointer app.previous_messages = app.messages # Process parser process_parser(app) self.assertEqual(app.messages, [])
def test_process_parser_analytics_average_no_numbers(self): """ Test that we correctly process a parser with average metric but no source numbers """ os.environ['TERM'] = 'dumb' app = Logria(None, False, False) # Fake window size: 10 x 100 app.height = 10 app.width = 100 # Set fake previous render app.last_row = app.height - 3 # simulate the last row we can render to app.current_end = 80 # Simulate the last message rendered # Set fake messages app.messages = [chr(x) for x in range(64, 80)] app.parser_index = 0 app.last_index_processed = 0 app.analytics_enabled = True # Set parser, activate app.parser = Parser() app.parser.set_pattern( pattern=r'(\d)', type_='regex', name='Test', example='4', analytics_methods={ 'Item': 'average' } ) # Set analytics method manually app.parser._analytics_map = dict( zip(range(len(app.parser._analytics_methods.keys())), app.parser._analytics_methods.keys())) # Since we manually construct alaytics, create the the key app.parser.analytics[0] = None self.assertIsNone(app.parser.apply_analytics(0, 'A'))
def test_process_parser_no_analytics(self): """ Test that we correctly process parser with no analytics """ os.environ['TERM'] = 'dumb' app = Logria(None, False, False) # Fake window size: 10 x 100 app.height = 10 app.width = 100 # Set fake previous render app.last_row = app.height - 3 # simulate the last row we can render to app.current_end = 80 # Simulate the last message rendered # Set fake messages app.messages = [str(x) for x in range(10)] app.parser_index = 0 app.last_index_processed = 0 # Set parser, activate app.parser = Parser() app.parser.set_pattern( pattern=r'(\d)', type_='regex', name='Test', example='4', analytics_methods={ 'Item': 'average' } ) # Store previous message pointer app.previous_messages = app.messages # Process parser process_parser(app) self.assertEqual(app.messages, [str(x) for x in range(10)])