Exemple #1
0
def test_tail_paasta_logs_empty_clusters():
    service = 'fake_service'
    levels = ['fake_level1', 'fake_level2']
    components = ['deploy', 'monitoring']
    clusters = []
    with contextlib.nested(
            mock.patch(
                'paasta_tools.cli.cmds.logs.determine_scribereader_envs',
                autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.scribe_tail',
                       autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True),
    ) as (
            determine_scribereader_envs_patch,
            scribe_tail_patch,
            log_patch,
            print_log_patch,
            queue_patch,
            process_patch,
    ):
        determine_scribereader_envs_patch.return_value = []
        fake_queue = mock.MagicMock(spec_set=Queue())
        fake_queue.get.side_effect = Empty
        queue_patch.return_value = fake_queue
        logs.tail_paasta_logs(service, levels, components, clusters)
        assert process_patch.call_count == 0
        assert print_log_patch.call_count == 0
Exemple #2
0
def test_tail_paasta_logs_ctrl_c_in_queue_get():
    service = 'fake_service'
    levels = ['fake_level1', 'fake_level2']
    components = ['deploy', 'monitoring']
    clusters = ['fake_cluster1', 'fake_cluster2']
    with contextlib.nested(
            mock.patch(
                'paasta_tools.cli.cmds.logs.determine_scribereader_envs',
                autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.scribe_tail',
                       autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True),
    ) as (
            determine_scribereader_envs_patch,
            scribe_tail_patch,
            log_patch,
            print_log_patch,
            queue_patch,
            process_patch,
    ):
        fake_queue = mock.MagicMock(spec_set=Queue())
        fake_queue.get.side_effect = FakeKeyboardInterrupt
        queue_patch.return_value = fake_queue
        try:
            logs.tail_paasta_logs(service, levels, components, clusters)
        # We have to catch this ourselves otherwise it will fool pytest too!
        except FakeKeyboardInterrupt:
            raise Exception(
                'The code under test failed to catch a (fake) KeyboardInterrupt!'
            )
Exemple #3
0
def test_tail_paasta_logs_empty_clusters():
    service = 'fake_service'
    levels = ['fake_level1', 'fake_level2']
    components = ['deploy', 'monitoring']
    clusters = []
    with contextlib.nested(
        mock.patch('paasta_tools.cli.cmds.logs.determine_scribereader_envs', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.scribe_tail', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True),
    ) as (
        determine_scribereader_envs_patch,
        scribe_tail_patch,
        log_patch,
        print_log_patch,
        queue_patch,
        process_patch,
    ):
        determine_scribereader_envs_patch.return_value = []
        fake_queue = mock.MagicMock(spec_set=Queue())
        fake_queue.get.side_effect = Empty
        queue_patch.return_value = fake_queue
        logs.tail_paasta_logs(service, levels, components, clusters)
        assert process_patch.call_count == 0
        assert print_log_patch.call_count == 0
Exemple #4
0
def test_tail_paasta_logs_marathon():
    service = 'fake_service'
    clusters = ['fake_cluster']
    levels = ['fake_level1', 'fake_level2']
    components = ['marathon']
    with contextlib.nested(
        mock.patch('paasta_tools.cli.cmds.logs.determine_scribereader_envs', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.scribe_tail', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.parse_marathon_log_line', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.marathon_log_line_passes_filter', autospec=True),
    ) as (
        determine_scribereader_envs_patch,
        scribe_tail_patch,
        log_patch,
        print_log_patch,
        queue_patch,
        process_patch,
        parse_marathon_log_line_patch,
        marathon_log_line_passes_filter_patch,
    ):
        determine_scribereader_envs_patch.return_value = ['env1']
        fake_queue = mock.MagicMock(spec_set=Queue())
        # Prevent tail_paasta_logs from reading from queue forever by simulating a Ctrl-C
        fake_queue.get.side_effect = KeyboardInterrupt
        queue_patch.return_value = fake_queue

        logs.tail_paasta_logs(service, levels, components, clusters)
        assert process_patch.call_count == 1
Exemple #5
0
def test_tail_paasta_logs_ctrl_c_in_is_alive():
    service = 'fake_service'
    levels = ['fake_level1', 'fake_level2']
    components = ['deploy', 'monitoring']
    clusters = ['fake_cluster1', 'fake_cluster2']
    with contextlib.nested(
        mock.patch('paasta_tools.cli.cmds.logs.determine_scribereader_envs', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.scribe_tail', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True),
    ) as (
        determine_scribereader_envs_patch,
        scribe_tail_patch,
        log_patch,
        print_log_patch,
        queue_patch,
        process_patch,
    ):
        determine_scribereader_envs_patch.return_value = ['env1', 'env2']
        fake_queue = mock.MagicMock(spec_set=Queue())
        fake_queue.get.side_effect = Empty
        queue_patch.return_value = fake_queue
        fake_process = mock.MagicMock()
        fake_process.is_alive.side_effect = FakeKeyboardInterrupt
        process_patch.return_value = fake_process
        try:
            logs.tail_paasta_logs(service, levels, components, clusters)
        # We have to catch this ourselves otherwise it will fool pytest too!
        except FakeKeyboardInterrupt:
            raise Exception('The code under test failed to catch a (fake) KeyboardInterrupt!')
Exemple #6
0
def test_tail_paasta_logs_marathon():
    service = 'fake_service'
    clusters = ['fake_cluster']
    levels = ['fake_level1', 'fake_level2']
    components = ['marathon']
    with contextlib.nested(
        mock.patch('paasta_tools.cli.cmds.logs.determine_scribereader_envs', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.scribe_tail', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.parse_marathon_log_line', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.marathon_log_line_passes_filter', autospec=True),
    ) as (
        determine_scribereader_envs_patch,
        scribe_tail_patch,
        log_patch,
        print_log_patch,
        queue_patch,
        process_patch,
        parse_marathon_log_line_patch,
        marathon_log_line_passes_filter_patch,
    ):
        determine_scribereader_envs_patch.return_value = ['env1']
        fake_queue = mock.MagicMock(spec_set=Queue())
        # Prevent tail_paasta_logs from reading from queue forever by simulating a Ctrl-C
        fake_queue.get.side_effect = KeyboardInterrupt
        queue_patch.return_value = fake_queue

        logs.tail_paasta_logs(service, levels, components, clusters)
        assert process_patch.call_count == 1
Exemple #7
0
def test_tail_paasta_logs_aliveness_check():
    service = 'fake_service'
    levels = ['fake_level1', 'fake_level2']
    components = ['deploy', 'monitoring']
    clusters = ['fake_cluster1', 'fake_cluster2']
    with contextlib.nested(
            mock.patch(
                'paasta_tools.cli.cmds.logs.determine_scribereader_envs',
                autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.scribe_tail',
                       autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True),
    ) as (
            determine_scribereader_envs_patch,
            scribe_tail_patch,
            log_patch,
            print_log_patch,
            queue_patch,
            process_patch,
    ):
        determine_scribereader_envs_patch.return_value = ['env1', 'env2']
        fake_queue = mock.MagicMock(spec_set=Queue())
        fake_queue.get.side_effect = Empty
        queue_patch.return_value = fake_queue
        fake_process = mock.MagicMock()
        is_alive_responses = [
            # First time: simulate both threads being alive.
            True,
            True,
            # Second time: simulate first thread is alive but second thread is now dead.
            True,
            False,
            # This gets us into the kill stanza, which calls is_alive() on each
            # thread again. We'll recycle our answers from the previous calls
            # to is_alive() where the first thread is alive but the second
            # thread is dead.
            True,
            False,
        ]
        fake_process.is_alive.side_effect = is_alive_responses
        process_patch.return_value = fake_process
        logs.tail_paasta_logs(service, levels, components, clusters)
        # is_alive() should be called on all the values we painstakingly provided above.
        assert fake_process.is_alive.call_count == len(is_alive_responses)
        # We only terminate the first thread, which is still alive. We don't
        # terminate the second thread, which was already dead.
        assert fake_process.terminate.call_count == 1
Exemple #8
0
def tail_paasta_logs_let_threads_be_threads(context):
    """This test lets tail_paasta_logs() fire off processes to do work. We
    verify that the work was done, basically irrespective of how it was done.
    """
    service = 'fake_service'
    context.levels = ['fake_level1', 'fake_level2']
    context.components = ['deploy', 'monitoring']
    context.clusters = ['fake_cluster1', 'fake_cluster2']
    with contextlib.nested(
            mock.patch(
                'paasta_tools.cli.cmds.logs.determine_scribereader_envs',
                autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.scribe_tail',
                       autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True),
            mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True),
    ) as (
            context.determine_scribereader_envs_patch,
            scribe_tail_patch,
            log_patch,
            context.print_log_patch,
    ):
        context.determine_scribereader_envs_patch.return_value = [
            'env1', 'env2'
        ]

        def scribe_tail_side_effect(
            scribe_env,
            stream_name,
            service,
            levels,
            components,
            clusters,
            queue,
            filter_fn,
        ):
            # The print here is just for debugging
            print 'fake log line added for %s' % scribe_env
            queue.put('fake log line added for %s' % scribe_env)
            # This sleep() was the straw that broke the camel's back
            # and forced me to move this test into the integration
            # suite. The test is flaky without the sleep, and the
            # sleep make it a lousy unit test.
            time.sleep(0.05)

        scribe_tail_patch.side_effect = scribe_tail_side_effect

        logs.tail_paasta_logs(service, context.levels, context.components,
                              context.clusters)
Exemple #9
0
def test_tail_paasta_logs_aliveness_check():
    service = 'fake_service'
    levels = ['fake_level1', 'fake_level2']
    components = ['deploy', 'monitoring']
    clusters = ['fake_cluster1', 'fake_cluster2']
    with contextlib.nested(
        mock.patch('paasta_tools.cli.cmds.logs.determine_scribereader_envs', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.scribe_tail', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True),
    ) as (
        determine_scribereader_envs_patch,
        scribe_tail_patch,
        log_patch,
        print_log_patch,
        queue_patch,
        process_patch,
    ):
        determine_scribereader_envs_patch.return_value = ['env1', 'env2']
        fake_queue = mock.MagicMock(spec_set=Queue())
        fake_queue.get.side_effect = Empty
        queue_patch.return_value = fake_queue
        fake_process = mock.MagicMock()
        is_alive_responses = [
            # First time: simulate both threads being alive.
            True, True,
            # Second time: simulate first thread is alive but second thread is now dead.
            True, False,
            # This gets us into the kill stanza, which calls is_alive() on each
            # thread again. We'll recycle our answers from the previous calls
            # to is_alive() where the first thread is alive but the second
            # thread is dead.
            True, False,
        ]
        fake_process.is_alive.side_effect = is_alive_responses
        process_patch.return_value = fake_process
        logs.tail_paasta_logs(service, levels, components, clusters)
        # is_alive() should be called on all the values we painstakingly provided above.
        assert fake_process.is_alive.call_count == len(is_alive_responses)
        # We only terminate the first thread, which is still alive. We don't
        # terminate the second thread, which was already dead.
        assert fake_process.terminate.call_count == 1
Exemple #10
0
def tail_paasta_logs_let_threads_be_threads(context):
    """This test lets tail_paasta_logs() fire off processes to do work. We
    verify that the work was done, basically irrespective of how it was done.
    """
    service = 'fake_service'
    context.levels = ['fake_level1', 'fake_level2']
    context.components = ['deploy', 'monitoring']
    context.clusters = ['fake_cluster1', 'fake_cluster2']
    with contextlib.nested(
        mock.patch('paasta_tools.cli.cmds.logs.determine_scribereader_envs', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.scribe_tail', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True),
        mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True),
    ) as (
        context.determine_scribereader_envs_patch,
        scribe_tail_patch,
        log_patch,
        context.print_log_patch,
    ):
        context.determine_scribereader_envs_patch.return_value = ['env1', 'env2']

        def scribe_tail_side_effect(
            scribe_env,
            stream_name,
            service,
            levels,
            components,
            clusters,
            queue,
            filter_fn,
        ):
            # The print here is just for debugging
            print 'fake log line added for %s' % scribe_env
            queue.put('fake log line added for %s' % scribe_env)
            # This sleep() was the straw that broke the camel's back
            # and forced me to move this test into the integration
            # suite. The test is flaky without the sleep, and the
            # sleep make it a lousy unit test.
            time.sleep(0.05)
        scribe_tail_patch.side_effect = scribe_tail_side_effect

        logs.tail_paasta_logs(service, context.levels, context.components, context.clusters)