def test_tail_paasta_logs_aliveness_check(): service = "fake_service" levels = ["fake_level1", "fake_level2"] components = ["deploy", "monitoring"] clusters = ["fake_cluster1", "fake_cluster2"] instances = ["fake_instance1", "fake_instance2"] with mock.patch( "paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs", autospec=True, ) as determine_scribereader_envs_patch, mock.patch( "paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.log", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.print_log", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.Queue", autospec=True ) as queue_patch, mock.patch( "paasta_tools.cli.cmds.logs.Process", autospec=True ) as process_patch, mock.patch( "paasta_tools.cli.cmds.logs.scribereader", autospec=True ): determine_scribereader_envs_patch.return_value = ["env1", "env2"] fake_queue = mock.MagicMock(spec_set=Queue()) fake_queue.get.side_effect = Empty queue_patch.return_value = fake_queue fake_process = mock.MagicMock() is_alive_responses = [ # First time: simulate both threads being alive. True, True, # Second time: simulate first thread is alive but second thread is now dead. True, False, # This gets us into the kill stanza, which calls is_alive() on each # thread again. We'll recycle our answers from the previous calls # to is_alive() where the first thread is alive but the second # thread is dead. True, False, ] fake_process.is_alive.side_effect = is_alive_responses process_patch.return_value = fake_process scribe_log_reader = logs.ScribeLogReader( cluster_map={"env1": "env1", "env2": "env2"} ) scribe_log_reader.tail_logs(service, levels, components, clusters, instances) # is_alive() should be called on all the values we painstakingly provided above. assert fake_process.is_alive.call_count == len(is_alive_responses) # We only terminate the first thread, which is still alive. We don't # terminate the second thread, which was already dead. assert fake_process.terminate.call_count == 1
def test_determine_scribereader_envs(): cluster = 'fake_cluster' components = ['build', 'monitoring'] with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.scribereader'), ) as ( mock_scribereader, ): cluster_map = { cluster: 'fake_scribe_env', } actual = logs.ScribeLogReader( cluster_map=cluster_map).determine_scribereader_envs( components, cluster) assert actual == set(['devc', 'fake_scribe_env'])
def test_scribereader_print_last_n_logs(): service = "fake_service" levels = ["fake_level1", "fake_level2"] clusters = ["fake_cluster1", "fake_cluster2"] instances = ["main"] components = ["build", "deploy", "monitoring", "marathon", "stdout", "stderr"] with mock.patch( "paasta_tools.cli.cmds.logs.scribereader", autospec=True ) as mock_scribereader, mock.patch( "paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs", autospec=True, ) as determine_scribereader_envs_patch: determine_scribereader_envs_patch.return_value = ["env1", "env2"] mock_scribereader.get_tail_host_and_port.return_value = "fake_host", "fake_port" fake_iter = mock.MagicMock() fake_iter.__iter__.return_value = ( [ """{"cluster":"fake_cluster1","component":"stderr","instance":"main", "level":"debug","message":"testing", "timestamp":"2016-06-08T06:31:52.706609135Z"}""" ] * 100 ) mock_scribereader.get_stream_tailer.return_value = fake_iter logs.ScribeLogReader(cluster_map={}).print_last_n_logs( service, 100, levels, components, clusters, instances, pods=None, raw_mode=False, strip_headers=False, ) # one call per component per environment except marathon which runs 1/env/cluster # Defaults: # env1, env2 = 2 # marathon: # env1: cluster1 cluster2 = 2 # env2: cluster1 cluster2 = 2 # stdout: # env1, env2 = 2 # stderr: # env1, env2 = 2 assert mock_scribereader.get_stream_tailer.call_count == 10
def tail_paasta_logs_let_threads_be_threads(context): """This test lets tail_paasta_logs() fire off processes to do work. We verify that the work was done, basically irrespective of how it was done. """ service = 'fake_service' context.levels = ['fake_level1', 'fake_level2'] context.components = ['deploy', 'monitoring'] context.clusters = ['fake_cluster1', 'fake_cluster2'] context.instances = ['fake_instance'] with mock.patch( 'paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True, ) as context.determine_scribereader_envs_patch, mock.patch( 'paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True, ) as scribe_tail_patch, mock.patch( 'paasta_tools.cli.cmds.logs.log', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.print_log', autospec=True, ) as context.print_log_patch, mock.patch( 'paasta_tools.cli.cmds.logs.scribereader', ): context.determine_scribereader_envs_patch.return_value = ['env1', 'env2'] def scribe_tail_side_effect( self, scribe_env, stream_name, service, levels, components, clusters, instances, queue, filter_fn, parse_fn=None ): # The print here is just for debugging paasta_print('fake log line added for %s' % scribe_env) queue.put('fake log line added for %s' % scribe_env) # This sleep() was the straw that broke the camel's back # and forced me to move this test into the integration # suite. The test is flaky without the sleep, and the # sleep make it a lousy unit test. time.sleep(0.05) scribe_tail_patch.side_effect = scribe_tail_side_effect context.scribe_log_reader = logs.ScribeLogReader(cluster_map={'env1': 'env1', 'env2': 'env2'}) context.scribe_log_reader.tail_logs( service, context.levels, context.components, context.clusters, context.instances)
def test_scribereader_run_code_over_scribe_envs(): clusters = ['fake_cluster1', 'fake_cluster2'] components = ['build', 'deploy', 'monitoring', 'marathon', 'chronos', 'stdout', 'stderr'] callback = mock.MagicMock() with mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True) \ as determine_scribereader_envs_patch, \ mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True): envs = ['env1', 'env2'] determine_scribereader_envs_patch.return_value = envs logs.ScribeLogReader(cluster_map={}).run_code_over_scribe_envs(clusters, components, callback) # See comment in test_scribereader_print_last_n_logs for where this figure comes from assert callback.call_count == 14
def test_scribereader_print_last_n_logs(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] instances = ['main'] components = [ 'build', 'deploy', 'monitoring', 'marathon', 'chronos', 'stdout', 'stderr' ] with mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True) as mock_scribereader, \ mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True) \ as determine_scribereader_envs_patch: determine_scribereader_envs_patch.return_value = ['env1', 'env2'] fake_iter = mock.MagicMock() fake_iter.__iter__.return_value = [ """{"cluster":"fake_cluster1","component":"stderr","instance":"main", "level":"debug","message":"testing", "timestamp":"2016-06-08T06:31:52.706609135Z"}""" ] * 100 mock_scribereader.get_stream_tailer.return_value = fake_iter logs.ScribeLogReader(cluster_map={}).print_last_n_logs( service, 100, levels, components, clusters, instances, raw_mode=False, ) # one call per component per environment except marathon and chronos which run 1/env/cluster # Defaults: # env1, env2 = 2 # marathon: # env1: cluster1 cluster2 = 2 # env2: cluster1 cluster2 = 2 # chronos: # env1: cluster1 cluster2 = 2 # env2: cluster1 cluster2 = 2 # stdout: # env1, env2 = 2 # stderr: # env1, env2 = 2 assert mock_scribereader.get_stream_tailer.call_count == 14
def test_tail_paasta_logs_aliveness_check(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['deploy', 'monitoring'] clusters = ['fake_cluster1', 'fake_cluster2'] with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.scribereader'), ) as ( determine_scribereader_envs_patch, scribe_tail_patch, log_patch, print_log_patch, queue_patch, process_patch, mock_scribereader, ): determine_scribereader_envs_patch.return_value = ['env1', 'env2'] fake_queue = mock.MagicMock(spec_set=Queue()) fake_queue.get.side_effect = Empty queue_patch.return_value = fake_queue fake_process = mock.MagicMock() is_alive_responses = [ # First time: simulate both threads being alive. True, True, # Second time: simulate first thread is alive but second thread is now dead. True, False, # This gets us into the kill stanza, which calls is_alive() on each # thread again. We'll recycle our answers from the previous calls # to is_alive() where the first thread is alive but the second # thread is dead. True, False, ] fake_process.is_alive.side_effect = is_alive_responses process_patch.return_value = fake_process scribe_log_reader = logs.ScribeLogReader(cluster_map={'env1': 'env1', 'env2': 'env2'}) scribe_log_reader.tail_logs(service, levels, components, clusters) # is_alive() should be called on all the values we painstakingly provided above. assert fake_process.is_alive.call_count == len(is_alive_responses) # We only terminate the first thread, which is still alive. We don't # terminate the second thread, which was already dead. assert fake_process.terminate.call_count == 1
def test_determine_scribereader_additional_envs(): cluster = "fake_cluster" components = ["fake_component"] with mock.patch( "paasta_tools.cli.cmds.logs.scribereader", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.LOG_COMPONENTS", spec_set=dict, autospec=None ) as mock_LOG_COMPONENTS: cluster_map = {cluster: "fake_scribe_env"} LOG_COMPONENTS = { "fake_component": {"additional_source_envs": ["fake_scribe_env2"]} } mock_LOG_COMPONENTS.__getitem__.side_effect = LOG_COMPONENTS.__getitem__ actual = logs.ScribeLogReader( cluster_map=cluster_map ).determine_scribereader_envs(components, cluster) assert "fake_scribe_env" in actual and "fake_scribe_env2" in actual
def test_determine_scribereader_additional_envs(): cluster = 'fake_cluster' components = ['fake_component'] with mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True), \ mock.patch('paasta_tools.cli.cmds.logs.LOG_COMPONENTS', spec_set=dict, autospec=None) as mock_LOG_COMPONENTS: cluster_map = { cluster: 'fake_scribe_env', } LOG_COMPONENTS = { 'fake_component': { 'additional_source_envs': ['fake_scribe_env2'] } } mock_LOG_COMPONENTS.__getitem__.side_effect = LOG_COMPONENTS.__getitem__ actual = logs.ScribeLogReader(cluster_map=cluster_map).determine_scribereader_envs(components, cluster) assert 'fake_scribe_env' in actual and 'fake_scribe_env2' in actual
def test_tail_paasta_logs_marathon(): service = 'fake_service' clusters = ['fake_cluster'] instances = ['fake_instance'] levels = ['fake_level1', 'fake_level2'] components = ['marathon'] with mock.patch( 'paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True, ) as determine_scribereader_envs_patch, mock.patch( 'paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.log', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.print_log', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.Queue', autospec=True, ) as queue_patch, mock.patch( 'paasta_tools.cli.cmds.logs.Process', autospec=True, ) as process_patch, mock.patch( 'paasta_tools.cli.cmds.logs.parse_marathon_log_line', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.marathon_log_line_passes_filter', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.scribereader', autospec=True, ): determine_scribereader_envs_patch.return_value = ['env1'] fake_queue = mock.MagicMock(spec_set=Queue()) # Prevent tail_paasta_logs from reading from queue forever by simulating a Ctrl-C fake_queue.get.side_effect = KeyboardInterrupt queue_patch.return_value = fake_queue logs.ScribeLogReader(cluster_map={ 'env1': 'env1' }).tail_logs(service, levels, components, clusters, instances) assert process_patch.call_count == 1
def test_tail_paasta_logs_ctrl_c_in_is_alive(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['deploy', 'monitoring'] clusters = ['fake_cluster1', 'fake_cluster2'] with contextlib.nested( mock.patch( 'paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True), mock.patch( 'paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.scribereader'), ) as ( determine_scribereader_envs_patch, scribe_tail_patch, log_patch, print_log_patch, queue_patch, process_patch, mock_scribereader, ): determine_scribereader_envs_patch.return_value = ['env1', 'env2'] fake_queue = mock.MagicMock(spec_set=Queue()) fake_queue.get.side_effect = Empty queue_patch.return_value = fake_queue fake_process = mock.MagicMock() fake_process.is_alive.side_effect = FakeKeyboardInterrupt process_patch.return_value = fake_process scribe_log_reader = logs.ScribeLogReader(cluster_map={ 'env1': 'env1', 'env2': 'env2' }) try: scribe_log_reader.tail_logs(service, levels, components, clusters) # We have to catch this ourselves otherwise it will fool pytest too! except FakeKeyboardInterrupt: raise Exception( 'The code under test failed to catch a (fake) KeyboardInterrupt!' )
def test_tail_paasta_logs_ctrl_c_in_is_alive(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['deploy', 'monitoring'] clusters = ['fake_cluster1', 'fake_cluster2'] instances = ['fake_instance1', 'fake_instance2'] with mock.patch( 'paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True, ) as determine_scribereader_envs_patch, mock.patch( 'paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.log', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.print_log', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.Queue', autospec=True, ) as queue_patch, mock.patch( 'paasta_tools.cli.cmds.logs.Process', autospec=True, ) as process_patch, mock.patch( 'paasta_tools.cli.cmds.logs.scribereader', autospec=True, ): determine_scribereader_envs_patch.return_value = ['env1', 'env2'] fake_queue = mock.MagicMock(spec_set=Queue()) fake_queue.get.side_effect = Empty queue_patch.return_value = fake_queue fake_process = mock.MagicMock() fake_process.is_alive.side_effect = FakeKeyboardInterrupt process_patch.return_value = fake_process scribe_log_reader = logs.ScribeLogReader(cluster_map={ 'env1': 'env1', 'env2': 'env2' }) with reraise_keyboardinterrupt(): scribe_log_reader.tail_logs(service, levels, components, clusters, instances)
def test_scribereader_run_code_over_scribe_envs(): clusters = ["fake_cluster1", "fake_cluster2"] components = ["build", "deploy", "monitoring", "marathon", "stdout", "stderr"] callback = mock.MagicMock() with mock.patch( "paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs", autospec=True, ) as determine_scribereader_envs_patch, mock.patch( "paasta_tools.cli.cmds.logs.scribereader", autospec=True ): envs = ["env1", "env2"] determine_scribereader_envs_patch.return_value = envs logs.ScribeLogReader(cluster_map={}).run_code_over_scribe_envs( clusters, components, callback ) # See comment in test_scribereader_print_last_n_logs for where this figure comes from assert callback.call_count == 10
def test_tail_paasta_logs_ctrl_c_in_is_alive(): service = "fake_service" levels = ["fake_level1", "fake_level2"] components = ["deploy", "monitoring"] clusters = ["fake_cluster1", "fake_cluster2"] instances = ["fake_instance1", "fake_instance2"] pods = ["fake_pod1", "fake_pod2"] with mock.patch( "paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs", autospec=True, ) as determine_scribereader_envs_patch, mock.patch( "paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.log", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.print_log", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.Queue", autospec=True ) as queue_patch, mock.patch( "paasta_tools.cli.cmds.logs.Process", autospec=True ) as process_patch, mock.patch( "paasta_tools.cli.cmds.logs.scribereader", autospec=True ): determine_scribereader_envs_patch.return_value = ["env1", "env2"] fake_queue = mock.MagicMock(spec_set=Queue()) fake_queue.get.side_effect = Empty queue_patch.return_value = fake_queue fake_process = mock.MagicMock() fake_process.is_alive.side_effect = FakeKeyboardInterrupt process_patch.return_value = fake_process scribe_log_reader = logs.ScribeLogReader( cluster_map={"env1": "env1", "env2": "env2"} ) with reraise_keyboardinterrupt(): scribe_log_reader.tail_logs( service, levels, components, clusters, instances, pods )
def test_tail_paasta_logs_empty_clusters(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['deploy', 'monitoring'] clusters = [] instances = ['fake_instance'] with mock.patch( 'paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True, ) as determine_scribereader_envs_patch, mock.patch( 'paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.log', autospec=True, ), mock.patch( 'paasta_tools.cli.cmds.logs.print_log', autospec=True, ) as print_log_patch, mock.patch( 'paasta_tools.cli.cmds.logs.Queue', autospec=True, ) as queue_patch, mock.patch( 'paasta_tools.cli.cmds.logs.Process', autospec=True, ) as process_patch, mock.patch( 'paasta_tools.cli.cmds.logs.scribereader', autospec=True, ): determine_scribereader_envs_patch.return_value = [] fake_queue = mock.MagicMock(spec_set=Queue()) fake_queue.get.side_effect = Empty queue_patch.return_value = fake_queue logs.ScribeLogReader(cluster_map={}).tail_logs(service, levels, components, clusters, instances) assert process_patch.call_count == 0 assert print_log_patch.call_count == 0
def test_tail_paasta_logs_marathon(): service = "fake_service" clusters = ["fake_cluster"] instances = ["fake_instance"] levels = ["fake_level1", "fake_level2"] components = ["marathon"] with mock.patch( "paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs", autospec=True, ) as determine_scribereader_envs_patch, mock.patch( "paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.log", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.print_log", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.Queue", autospec=True ) as queue_patch, mock.patch( "paasta_tools.cli.cmds.logs.Process", autospec=True ) as process_patch, mock.patch( "paasta_tools.cli.cmds.logs.parse_marathon_log_line", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.marathon_log_line_passes_filter", autospec=True ), mock.patch( "paasta_tools.cli.cmds.logs.scribereader", autospec=True ): determine_scribereader_envs_patch.return_value = ["env1"] fake_queue = mock.MagicMock(spec_set=Queue()) # Prevent tail_paasta_logs from reading from queue forever by simulating a Ctrl-C fake_queue.get.side_effect = KeyboardInterrupt queue_patch.return_value = fake_queue logs.ScribeLogReader(cluster_map={"env1": "env1"}).tail_logs( service, levels, components, clusters, instances ) assert process_patch.call_count == 1
def test_scribereader_print_logs_by_time(): service = "fake_service" levels = ["fake_level1", "fake_level2"] clusters = ["fake_cluster1", "fake_cluster2"] instances = ["main"] components = ["build", "deploy", "monitoring", "marathon", "stdout", "stderr"] with mock.patch( "paasta_tools.cli.cmds.logs.scribereader", autospec=True ) as mock_scribereader, mock.patch( "paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs", autospec=True, ) as determine_scribereader_envs_patch: determine_scribereader_envs_patch.return_value = ["env1", "env2"] mock_scribereader.get_tail_host_and_port.return_value = "fake_host", "fake_port" fake_iter = mock.MagicMock() fake_iter.__iter__.return_value = ( [ b"""{"cluster":"fake_cluster1","component":"stderr","instance":"main", "level":"debug","message":"testing", "timestamp":"2016-06-08T06:31:52.706609135Z"}""" ] * 100 ) mock_scribereader.get_stream_tailer.return_value = fake_iter mock_scribereader.get_stream_reader.return_value = fake_iter start_time, end_time = logs.generate_start_end_time() logs.ScribeLogReader(cluster_map={}).print_logs_by_time( service, start_time, end_time, levels, components, clusters, instances, pods=None, raw_mode=False, strip_headers=False, ) # Please see comment in test_scribereader_print_last_n_logs for where this number comes from assert mock_scribereader.get_stream_reader.call_count == 10 start_time, end_time = logs.generate_start_end_time("3d", "2d") logs.ScribeLogReader(cluster_map={}).print_logs_by_time( service, start_time, end_time, levels, components, clusters, instances, pods=None, raw_mode=False, strip_headers=False, ) # Please see comment in test_scribereader_print_last_n_logs for where this number comes from assert mock_scribereader.get_stream_reader.call_count == 10 * 2
def test_cluster_to_scribe_env_bad(): with mock.patch("paasta_tools.cli.cmds.logs.scribereader", autospec=True): scribe_log_reader = logs.ScribeLogReader(cluster_map={}) with raises(SystemExit) as sys_exit: scribe_log_reader.cluster_to_scribe_env("dne") assert sys_exit.value.code == 1
def test_cluster_to_scribe_env_good(): with mock.patch("paasta_tools.cli.cmds.logs.scribereader", autospec=True): scribe_log_reader = logs.ScribeLogReader(cluster_map={"mesosstage": "env1"}) actual = scribe_log_reader.cluster_to_scribe_env("mesosstage") assert actual == "env1"
def test_cluster_to_scribe_env_good(): with mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True): scribe_log_reader = logs.ScribeLogReader( cluster_map={'mesosstage': 'env1'}) actual = scribe_log_reader.cluster_to_scribe_env('mesosstage') assert actual == 'env1'