def test_mesos_cpu_metrics_provider(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={}, branch_dict={}, ) fake_mesos_task = mock.MagicMock( stats={ 'cpus_limit': 1.1, 'cpus_system_time_secs': 240, 'cpus_user_time_secs': 240, }, ) fake_mesos_task.__getitem__.return_value = 'fake-service.fake-instance' fake_marathon_tasks = [mock.Mock(id='fake-service.fake-instance')] with contextlib.nested( mock.patch('paasta_tools.utils.KazooClient', autospec=True, return_value=mock.Mock(get=mock.Mock( side_effect=NoNodeError))), mock.patch('paasta_tools.utils.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_zk_hosts=mock.Mock())), ) as ( mock_zk_client, _, ): with raises(autoscaling_service_lib.MetricsProviderNoDataError): autoscaling_service_lib.mesos_cpu_metrics_provider( fake_marathon_service_config, fake_marathon_tasks, (fake_mesos_task,)) mock_zk_client.return_value.set.assert_has_calls([ mock.call('/autoscaling/fake-service/fake-instance/cpu_data', '480.0:fake-service.fake-instance'), ], any_order=True)
def test_autoscale_marathon_instance_aborts_when_task_deploying(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={'min_instances': 1, 'max_instances': 10}, branch_dict={}, ) with contextlib.nested( mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.set_instances_for_marathon_service', autospec=True), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.get_service_metrics_provider', autospec=True), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.get_decision_policy', autospec=True, return_value=mock.Mock(return_value=1)), mock.patch.object(marathon_tools.MarathonServiceConfig, 'get_instances', autospec=True, return_value=500), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib._log', autospec=True), ) as ( mock_set_instances_for_marathon_service, _, _, _, _, ): autoscaling_service_lib.autoscale_marathon_instance(fake_marathon_service_config, [mock.Mock()], [mock.Mock()]) assert not mock_set_instances_for_marathon_service.called
def test_get_http_utilization_for_all_tasks_no_data(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={}, branch_dict={}, ) fake_marathon_tasks = [mock.Mock(id='fake-service.fake-instance', host='fake_host', ports=[30101])] mock_json_mapper = mock.Mock(side_effect=KeyError('Detailed message')) # KeyError simulates an invalid response with contextlib.nested( mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.log.debug', autospec=True), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.get_json_body_from_service', autospec=True), ) as ( mock_log_debug, _, ): with raises(autoscaling_service_lib.MetricsProviderNoDataError): autoscaling_service_lib.get_http_utilization_for_all_tasks( fake_marathon_service_config, fake_marathon_tasks, endpoint='fake-endpoint', json_mapper=mock_json_mapper, ) mock_log_debug.assert_called_once_with( "Caught excpetion when querying fake-service on fake_host:30101 : 'Detailed message'")
def test_mesos_ram_cpu_metrics_provider_no_data_mesos(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={}, branch_dict={}, ) fake_marathon_tasks = [mock.Mock(id='fake-service.fake-instance')] zookeeper_get_payload = { 'cpu_last_time': '0', 'cpu_data': '', } with contextlib.nested( mock.patch('paasta_tools.utils.KazooClient', autospec=True, return_value=mock.Mock(get=mock.Mock( side_effect=lambda x: (zookeeper_get_payload[x.split('/')[-1]], None)))), mock.patch('paasta_tools.utils.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_zk_hosts=mock.Mock())), ) as ( _, _, ): with raises(autoscaling_lib.MetricsProviderNoDataError): autoscaling_lib.mesos_cpu_metrics_provider( fake_marathon_service_config, fake_marathon_tasks, [])
def test_update_instances_for_marathon_service(): with contextlib.nested( mock.patch( 'paasta_tools.marathon_tools.load_marathon_service_config', autospec=True), mock.patch('paasta_tools.utils.KazooClient', autospec=True), mock.patch('paasta_tools.utils.load_system_paasta_config', autospec=True), ) as ( mock_load_marathon_service_config, mock_zk_client, _, ): zk_client = mock.Mock(get=mock.Mock(side_effect=NoNodeError)) mock_zk_client.return_value = zk_client mock_load_marathon_service_config.return_value = marathon_tools.MarathonServiceConfig( service='service', instance='instance', cluster='cluster', config_dict={ 'min_instances': 5, 'max_instances': 10, }, branch_dict={}, ) autoscaling_lib.set_instances_for_marathon_service('service', 'instance', instance_count=8) zk_client.set.assert_called_once_with( '/autoscaling/service/instance/instances', '8')
def test_autoscale_marathon_instance_drastic_downscaling(): current_instances = 100 fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={'min_instances': 5, 'max_instances': 100}, branch_dict={}, ) with mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.set_instances_for_marathon_service', autospec=True, ) as mock_set_instances_for_marathon_service, mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.get_service_metrics_provider', autospec=True, **{'return_value.return_value': 0} ), mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.get_decision_policy', autospec=True, return_value=mock.Mock(return_value=-50), ), mock.patch.object( marathon_tools.MarathonServiceConfig, 'get_instances', autospec=True, return_value=current_instances, ), mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib._log', autospec=True, ): autoscaling_service_lib.autoscale_marathon_instance(fake_marathon_service_config, [mock.Mock() for i in range(current_instances)], [mock.Mock()]) mock_set_instances_for_marathon_service.assert_called_once_with( service='fake-service', instance='fake-instance', instance_count=int(current_instances * 0.7))
def test_get_zookeeper_instances_defaults_to_config_out_of_bounds(): fake_marathon_config = marathon_tools.MarathonServiceConfig( service='service', instance='instance', cluster='cluster', config_dict={ 'min_instances': 5, 'max_instances': 10, }, branch_dict={}, ) with contextlib.nested( mock.patch('paasta_tools.utils.KazooClient', autospec=True), mock.patch('paasta_tools.utils.load_system_paasta_config', autospec=True), ) as ( mock_zk_client, _, ): mock_zk_client.return_value = mock.Mock(get=mock.Mock( return_value=(15, None))) assert fake_marathon_config.get_instances() == 10 mock_zk_client.return_value = mock.Mock(get=mock.Mock( return_value=(0, None))) assert fake_marathon_config.get_instances() == 5
def given_a_new_app_to_be_deployed_constraints(context, state, bounce_method, drain_method, constraints): constraints = eval(constraints) if state == "healthy": cmd = "/bin/true" elif state == "unhealthy": cmd = "/bin/false" else: return ValueError("can't start test app with unknown state %s", state) context.service = 'bounce' context.instance = 'test1' context.new_id = 'bounce.test1.newapp.confighash' context.new_marathon_service_config = marathon_tools.MarathonServiceConfig( service=context.service, cluster=context.cluster, instance=context.instance, config_dict={ "cmd": "/bin/sleep 300", "instances": 2, "healthcheck_mode": "cmd", "healthcheck_cmd": cmd, "bounce_method": str(bounce_method), "drain_method": str(drain_method), "cpus": 0.1, "mem": 100, "disk": 10, "constraints": constraints, }, branch_dict={ 'docker_image': 'busybox', 'desired_state': 'start', 'force_bounce': None, }, )
def test_instances_status_marathon( mock_get_actual_deployments, mock_validate_service_instance, mock_load_marathon_service_config, mock_get_matching_appids, mock_marathon_job_status, ): settings.cluster = 'fake_cluster' mock_get_actual_deployments.return_value = { 'fake_cluster.fake_instance': 'GIT_SHA', 'fake_cluster.fake_instance2': 'GIT_SHA', 'fake_cluster2.fake_instance': 'GIT_SHA', 'fake_cluster2.fake_instance2': 'GIT_SHA', } mock_validate_service_instance.return_value = 'marathon' settings.marathon_clients = mock.Mock() mock_get_matching_appids.return_value = ['a', 'b'] mock_service_config = marathon_tools.MarathonServiceConfig( service='fake_service', cluster='fake_cluster', instance='fake_instance', config_dict={'bounce_method': 'fake_bounce'}, branch_dict=None, ) mock_load_marathon_service_config.return_value = mock_service_config mock_marathon_job_status.return_value = 'fake_marathon_status' request = testing.DummyRequest() request.swagger_data = {'service': 'fake_service', 'instance': 'fake_instance'} response = instance.instance_status(request) assert response['marathon']['bounce_method'] == 'fake_bounce' assert response['marathon']['desired_state'] == 'start'
def test_autoscale_marathon_with_http_stuff(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={'min_instances': 1, 'max_instances': 10, 'autoscaling': { 'decision_policy': 'pid', 'metrics_provider': 'http', 'endpoint': '/bogus', }, }, branch_dict={}, ) with contextlib.nested( mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.set_instances_for_marathon_service', autospec=True), mock.patch.object(marathon_tools.MarathonServiceConfig, 'get_instances', autospec=True, return_value=1), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib._log', autospec=True), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.get_http_utilization_for_all_tasks', autospec=True), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.get_decision_policy', autospec=True, return_value=mock.Mock(return_value=1)), ) as ( mock_set_instances_for_marathon_service, _, _, mock_get_http_utilization_for_all_tasks, _, ): autoscaling_service_lib.autoscale_marathon_instance(fake_marathon_service_config, [mock.Mock()], [mock.Mock()]) mock_set_instances_for_marathon_service.assert_called_once_with( service='fake-service', instance='fake-instance', instance_count=2) assert mock_get_http_utilization_for_all_tasks.called
def test_autoscale_services_no_data_marathon(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={ 'min_instances': 1, 'max_instances': 10, 'desired_state': 'start' }, branch_dict={}, ) mock_mesos_tasks = [] mock_marathon_tasks = [] with contextlib.nested( mock.patch( 'paasta_tools.autoscaling_lib.autoscale_marathon_instance', autospec=True), mock.patch('paasta_tools.autoscaling_lib.get_marathon_client', autospec=True, return_value=mock.Mock(list_tasks=mock.Mock( return_value=mock_marathon_tasks))), mock.patch( 'paasta_tools.autoscaling_lib.get_running_tasks_from_active_frameworks', autospec=True, return_value=mock_mesos_tasks), mock.patch( 'paasta_tools.autoscaling_lib.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_cluster=mock.Mock())), mock.patch('paasta_tools.utils.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_zk_hosts=mock.Mock())), mock.patch('paasta_tools.autoscaling_lib.get_services_for_cluster', autospec=True, return_value=[('fake-service', 'fake-instance')]), mock.patch( 'paasta_tools.autoscaling_lib.load_marathon_service_config', autospec=True, return_value=fake_marathon_service_config), mock.patch('paasta_tools.autoscaling_lib.load_marathon_config', autospec=True), mock.patch('paasta_tools.utils.KazooClient', autospec=True), mock.patch('paasta_tools.autoscaling_lib.create_autoscaling_lock', autospec=True), ) as ( mock_autoscale_marathon_instance, _, _, _, _, _, _, _, _, _, ): with raises(autoscaling_lib.MetricsProviderNoDataError): autoscaling_lib.autoscale_services()
def mock_service_config(): return marathon_tools.MarathonServiceConfig( service="fake_service", cluster="fake_cluster", instance="fake_instance", config_dict={"bounce_method": "fake_bounce"}, branch_dict=None, )
def test_mesos_cpu_metrics_provider(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={}, branch_dict={}, ) fake_mesos_task = mock.MagicMock(stats={ 'mem_rss_bytes': 0, 'mem_limit_bytes': 1000, 'cpus_limit': 1.1, 'cpus_system_time_secs': 240, 'cpus_user_time_secs': 240, }, ) fake_mesos_task.__getitem__.return_value = 'fake-service.fake-instance' fake_marathon_tasks = [mock.Mock(id='fake-service.fake-instance')] current_time = datetime.now() zookeeper_get_payload = { 'cpu_last_time': (current_time - timedelta(seconds=600)).strftime('%s'), 'cpu_data': '0:fake-service.fake-instance', } with contextlib.nested( mock.patch('paasta_tools.utils.KazooClient', autospec=True, return_value=mock.Mock(get=mock.Mock( side_effect=lambda x: (zookeeper_get_payload[x.split('/')[-1]], None)))), mock.patch('paasta_tools.autoscaling_lib.datetime', autospec=True), mock.patch('paasta_tools.utils.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_zk_hosts=mock.Mock())), ) as ( mock_zk_client, mock_datetime, _, ): mock_datetime.now.return_value = current_time assert autoscaling_lib.mesos_cpu_metrics_provider( fake_marathon_service_config, fake_marathon_tasks, (fake_mesos_task, )) == 0.8 mock_zk_client.return_value.set.assert_has_calls([ mock.call('/autoscaling/fake-service/fake-instance/cpu_last_time', current_time.strftime('%s')), mock.call('/autoscaling/fake-service/fake-instance/cpu_data', '480.0:fake-service.fake-instance'), ], any_order=True)
def test_http_metrics_provider_no_data(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={}, branch_dict={}, ) fake_marathon_tasks = [mock.Mock(id='fake-service.fake-instance', host='fake_host', ports=[30101])] mock_request_result = mock.Mock(json=mock.Mock(return_value='malformed_result')) with mock.patch('paasta_tools.autoscaling_lib.requests.get', autospec=True, return_value=mock_request_result): with raises(autoscaling_lib.MetricsProviderNoDataError): autoscaling_lib.http_metrics_provider(fake_marathon_service_config, fake_marathon_tasks, mock.Mock()) == 0.5
def test_http_metrics_provider(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={}, branch_dict={}, ) fake_marathon_tasks = [mock.Mock(id='fake-service.fake-instance', host='fake_host', ports=[30101])] mock_request_result = mock.Mock(json=mock.Mock(return_value={'utilization': '0.5'})) with mock.patch('paasta_tools.autoscaling_lib.requests.get', autospec=True, return_value=mock_request_result): assert autoscaling_lib.http_metrics_provider( fake_marathon_service_config, fake_marathon_tasks, mock.Mock()) == 0.5
def test_get_bouncing_status(): with mock.patch( 'paasta_tools.marathon_serviceinit.marathon_tools.get_matching_appids', autospec=True, ) as mock_get_matching_appids: mock_get_matching_appids.return_value = ['a', 'b'] mock_config = marathon_tools.MarathonServiceConfig( service='fake_service', cluster='fake_cluster', instance='fake_instance', config_dict={'bounce_method': 'fake_bounce'}, branch_dict={}, ) actual = marathon_serviceinit.get_bouncing_status('fake_service', 'fake_instance', 'unused', mock_config) assert 'fake_bounce' in actual assert 'Bouncing' in actual
def given_a_new_app_to_be_deployed_constraints( context, state, bounce_method, drain_method, constraints, host_port=0, net="bridge", instances=2, ): constraints = eval(constraints) if state == "healthy": cmd = "/bin/true" elif state == "unhealthy": cmd = "/bin/false" else: return ValueError("can't start test app with unknown state %s", state) context.service = "bounce" context.instance = "test1" context.new_id = "bounce.test1.newapp.confighash" context.new_marathon_service_config = marathon_tools.MarathonServiceConfig( service=context.service, cluster=context.cluster, instance=context.instance, config_dict={ "cmd": "/bin/sleep 300", "instances": instances, "healthcheck_mode": "cmd", "healthcheck_cmd": cmd, "bounce_method": str(bounce_method), "drain_method": str(drain_method), "cpus": 0.1, "mem": 100, "disk": 10, "constraints": constraints, "host_port": host_port, "net": net, }, branch_dict={ "docker_image": "busybox", "desired_state": "start", "force_bounce": None, }, ) context.current_client = context.marathon_clients.get_current_client_for_service( context.new_marathon_service_config )
def test_get_bouncing_status(): with contextlib.nested( mock.patch( 'paasta_tools.marathon_serviceinit.marathon_tools.get_matching_appids', autospec=True), ) as (mock_get_matching_appids, ): mock_get_matching_appids.return_value = ['a', 'b'] mock_config = marathon_tools.MarathonServiceConfig( 'fake_service', 'fake_instance', {'bounce_method': 'fake_bounce'}, {}, ) actual = marathon_serviceinit.get_bouncing_status( 'fake_service', 'fake_instance', 'unused', mock_config) assert 'fake_bounce' in actual assert 'Bouncing' in actual
def test_autoscale_services_happy_path(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={'min_instances': 1, 'max_instances': 10, 'desired_state': 'start'}, branch_dict={}, ) mock_mesos_tasks = [{'id': 'fake-service.fake-instance.sha123.sha456'}] mock_healthcheck_results = mock.Mock(alive=True) mock_marathon_tasks = [mock.Mock(id='fake-service.fake-instance.sha123.sha456', health_check_results=[mock_healthcheck_results])] with mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.autoscale_marathon_instance', autospec=True, ) as mock_autoscale_marathon_instance, mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.get_marathon_client', autospec=True, return_value=mock.Mock(list_tasks=mock.Mock(return_value=mock_marathon_tasks)), ), mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.get_all_running_tasks', autospec=True, return_value=mock_mesos_tasks, ), mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_cluster=mock.Mock()), ), mock.patch( 'paasta_tools.utils.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_zk_hosts=mock.Mock()), ), mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.get_services_for_cluster', autospec=True, return_value=[('fake-service', 'fake-instance')], ), mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.load_marathon_service_config', autospec=True, return_value=fake_marathon_service_config, ), mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.load_marathon_config', autospec=True, ), mock.patch( 'paasta_tools.utils.KazooClient', autospec=True, ), mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.create_autoscaling_lock', autospec=True, ), mock.patch( 'paasta_tools.marathon_tools.MarathonServiceConfig.format_marathon_app_dict', autospec=True, ) as mock_format_marathon_app_dict: mock_format_marathon_app_dict.return_value = {'id': 'fake-service.fake-instance.sha123.sha456'} autoscaling_service_lib.autoscale_services() mock_autoscale_marathon_instance.assert_called_once_with( fake_marathon_service_config, mock_marathon_tasks, mock_mesos_tasks)
def test_get_bouncing_status(): with mock.patch( "paasta_tools.marathon_serviceinit.marathon_tools.get_matching_appids", autospec=True, ) as mock_get_matching_appids: mock_get_matching_appids.return_value = ["a", "b"] mock_config = marathon_tools.MarathonServiceConfig( service="fake_service", cluster="fake_cluster", instance="fake_instance", config_dict={"bounce_method": "fake_bounce"}, branch_dict=None, ) actual = marathon_serviceinit.get_bouncing_status( "fake_service", "fake_instance", "unused", mock_config) assert "fake_bounce" in actual assert "Bouncing" in actual
def test_pid_decision_policy(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={}, branch_dict={}, ) current_time = datetime.now() zookeeper_get_payload = { 'pid_iterm': 0, 'pid_last_error': 0, 'pid_last_time': (current_time - timedelta(seconds=600)).strftime('%s'), } with contextlib.nested( mock.patch('paasta_tools.utils.KazooClient', autospec=True, return_value=mock.Mock(get=mock.Mock( side_effect=lambda x: (zookeeper_get_payload[x.split('/')[-1]], None)))), mock.patch('paasta_tools.autoscaling_lib.datetime', autospec=True), mock.patch('paasta_tools.utils.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_zk_hosts=mock.Mock())), ) as ( mock_zk_client, mock_datetime, _, ): mock_datetime.now.return_value = current_time assert autoscaling_lib.pid_decision_policy( fake_marathon_service_config, 0.0) == 0 mock_zk_client.return_value.set.assert_has_calls([ mock.call('/autoscaling/fake-service/fake-instance/pid_iterm', '0.0'), mock.call('/autoscaling/fake-service/fake-instance/pid_last_error', '0.0'), mock.call('/autoscaling/fake-service/fake-instance/pid_last_time', '%s' % current_time.strftime('%s')), ], any_order=True)
def test_get_zookeeper_instances_defaults_to_max_instances_when_no_zk_node(): fake_marathon_config = marathon_tools.MarathonServiceConfig( service='service', instance='instance', cluster='cluster', config_dict={ 'min_instances': 5, 'max_instances': 10, }, branch_dict={}, ) with mock.patch( 'paasta_tools.utils.KazooClient', autospec=True, ) as mock_zk_client, mock.patch( 'paasta_tools.utils.load_system_paasta_config', autospec=True, ): mock_zk_client.return_value = mock.Mock(get=mock.Mock(side_effect=NoNodeError)) assert fake_marathon_config.get_instances() == 10
def test_autoscale_services_bespoke_doesnt_autoscale(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={'min_instances': 1, 'max_instances': 10, 'desired_state': 'start', 'autoscaling': {'decision_policy': 'bespoke'}}, branch_dict={}, ) mock_mesos_tasks = [{'id': 'fake-service.fake-instance'}] mock_marathon_tasks = [mock.Mock(id='fake-service.fake-instance')] with contextlib.nested( mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.autoscale_marathon_instance', autospec=True), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.get_marathon_client', autospec=True, return_value=mock.Mock(list_tasks=mock.Mock(return_value=mock_marathon_tasks))), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.get_running_tasks_from_frameworks', autospec=True, return_value=mock_mesos_tasks), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_cluster=mock.Mock())), mock.patch('paasta_tools.utils.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_zk_hosts=mock.Mock())), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.get_services_for_cluster', autospec=True, return_value=[('fake-service', 'fake-instance')]), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.load_marathon_service_config', autospec=True, return_value=fake_marathon_service_config), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.load_marathon_config', autospec=True), mock.patch('paasta_tools.utils.KazooClient', autospec=True), mock.patch('paasta_tools.autoscaling.autoscaling_service_lib.create_autoscaling_lock', autospec=True), ) as ( mock_autoscale_marathon_instance, _, _, _, _, _, _, _, _, _, ): autoscaling_service_lib.autoscale_services() assert not mock_autoscale_marathon_instance.called
def test_get_zookeeper_instances(): fake_marathon_config = marathon_tools.MarathonServiceConfig( service='service', instance='instance', cluster='cluster', config_dict={ 'instances': 5, 'max_instances': 10, }, branch_dict={}, ) with mock.patch( 'paasta_tools.utils.KazooClient', autospec=True, ) as mock_zk_client, mock.patch( 'paasta_tools.utils.load_system_paasta_config', autospec=True, ): mock_zk_get = mock.Mock(return_value=(7, None)) mock_zk_client.return_value = mock.Mock(get=mock_zk_get) assert fake_marathon_config.get_instances() == 7 assert mock_zk_get.call_count == 1
def test_autoscale_marathon_instance_up_to_min_instances(): current_instances = 5 fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={'min_instances': 10, 'max_instances': 100}, branch_dict={}, ) with mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.set_instances_for_marathon_service', autospec=True, ) as mock_set_instances_for_marathon_service, mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.get_service_metrics_provider', autospec=True, **{'return_value.return_value': 0} ), mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib.get_decision_policy', autospec=True, return_value=mock.Mock(return_value=-3), ), mock.patch.object( marathon_tools.MarathonServiceConfig, 'get_instances', autospec=True, return_value=current_instances, ), mock.patch( 'paasta_tools.autoscaling.autoscaling_service_lib._log', autospec=True, ): autoscaling_service_lib.autoscale_marathon_instance(fake_marathon_service_config, [mock.Mock()] * 5, [mock.Mock()] * 5) mock_set_instances_for_marathon_service.assert_called_once_with( service='fake-service', instance='fake-instance', instance_count=10) # even if we don't find the tasks healthy in marathon we shouldn't be below min_instances mock_set_instances_for_marathon_service.reset_mock() autoscaling_service_lib.autoscale_marathon_instance(fake_marathon_service_config, [mock.Mock()] * (int(5 * (1 - MAX_TASK_DELTA)) - 1), [mock.Mock()] * (int(5 * (1 - MAX_TASK_DELTA)) - 1)) mock_set_instances_for_marathon_service.assert_called_once_with( service='fake-service', instance='fake-instance', instance_count=10)
def send_sensu_bounce_keepalive(service, instance, soa_dir, cluster, config): """Send a Sensu event with a special ``ttl``, to let Sensu know that the everything is fine. This event is **not** fired when the bounce is in progress. If the bounce goes on for too long, this the ``ttl`` will expire and Sensu will emit a new event saying that this one didn't check in within the expected time-to-live.""" ttl = '1h' marathon_service_config = marathon_tools.MarathonServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=config, branch_dict=None, ) monitoring_overrides = marathon_service_config.get_monitoring() # Sensu currently emits events for expired ttl checks every 30s monitoring_overrides['check_every'] = '30s' monitoring_overrides['alert_after'] = '2m' monitoring_overrides['runbook'] = 'http://y/paasta-troubleshooting' monitoring_overrides['tip'] = ( "Check out `paasta logs`. If the bounce hasn't made progress, " "it may mean that the new version isn't healthy.") # Dogfooding this alert till I'm comfortable it doesn't spam people monitoring_overrides['team'] = 'noop' monitoring_overrides['notification_email'] = '*****@*****.**' monitoring_tools.send_event( service=service, check_name='paasta_bounce_progress.%s' % compose_job_id(service, instance), overrides=monitoring_overrides, status=pysensu_yelp.Status.OK, output="The bounce is in a steady state", soa_dir=soa_dir, ttl=ttl, )
def test_mesos_ram_metrics_provider(): fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='fake-service', instance='fake-instance', cluster='fake-cluster', config_dict={}, branch_dict={}, ) fake_mesos_task = mock.MagicMock(stats={ 'mem_rss_bytes': 800, 'mem_limit_bytes': 1000, 'cpus_limit': 1.1, }, ) fake_mesos_task.__getitem__.return_value = 'fake-service.fake-instance' fake_marathon_tasks = [mock.Mock(id='fake-service.fake-instance')] current_time = datetime.now() with contextlib.nested( mock.patch('paasta_tools.utils.KazooClient', autospec=True, return_value=mock.Mock(get=mock.Mock( side_effect=NoNodeError))), mock.patch('paasta_tools.autoscaling_lib.datetime', autospec=True), mock.patch('paasta_tools.utils.load_system_paasta_config', autospec=True, return_value=mock.Mock(get_zk_hosts=mock.Mock())), ) as ( mock_zk_client, mock_datetime, _, ): mock_datetime.now.return_value = current_time assert autoscaling_lib.mesos_cpu_ram_metrics_provider( fake_marathon_service_config, fake_marathon_tasks, (fake_mesos_task, )) == 0.8
from paasta_tools.utils import compose_job_id from paasta_tools.utils import DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT from paasta_tools.utils import NoDockerImageError from paasta_tools.utils import PaastaColors from paasta_tools.utils import remove_ansi_escape_sequences from paasta_tools.utils import SystemPaastaConfig fake_marathon_job_config = marathon_tools.MarathonServiceConfig( service="servicename", cluster="clustername", instance="instancename", config_dict={ "instances": 3, "cpus": 1, "mem": 100, "disk": 512, "nerve_ns": "fake_nerve_ns", }, branch_dict={ "docker_image": "test_docker:1.0", "desired_state": "start", "force_bounce": None, }, ) def test_get_bouncing_status(): with mock.patch( "paasta_tools.marathon_serviceinit.marathon_tools.get_matching_appids", autospec=True, ) as mock_get_matching_appids:
class TestSetupMarathonJob: fake_docker_image = 'test_docker:1.0' fake_cluster = 'fake_test_cluster' fake_marathon_service_config = marathon_tools.MarathonServiceConfig( service='servicename', cluster='clustername', instance='instancename', config_dict={ 'instances': 3, 'cpus': 1, 'mem': 100, 'docker_image': fake_docker_image, 'nerve_ns': 'aaaaugh', 'bounce_method': 'brutal' }, branch_dict={}, ) fake_docker_registry = 'remote_registry.com' fake_marathon_config = marathon_tools.MarathonConfig({ 'url': 'http://test_url', 'user': '******', 'password': '******', }, '/fake/fake_file.json') fake_args = mock.MagicMock( service_instance='what_is_love.bby_dont_hurt_me', soa_dir='no_more', verbose=False, ) fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig({ 'mode': 'http' }) def test_main_success(self): fake_client = mock.MagicMock() with contextlib.nested( mock.patch( 'paasta_tools.setup_marathon_job.parse_args', return_value=self.fake_args, autospec=True, ), mock.patch( 'paasta_tools.setup_marathon_job.get_main_marathon_config', return_value=self.fake_marathon_config, autospec=True, ), mock.patch( 'paasta_tools.marathon_tools.get_marathon_client', return_value=fake_client, autospec=True, ), mock.patch( 'paasta_tools.marathon_tools.load_marathon_service_config', return_value=self.fake_marathon_service_config, autospec=True, ), mock.patch( 'paasta_tools.setup_marathon_job.setup_service', return_value=(0, 'it_is_finished'), autospec=True, ), mock.patch('paasta_tools.setup_marathon_job.load_system_paasta_config', autospec=True), mock.patch('paasta_tools.setup_marathon_job.send_event', autospec=True), mock.patch('sys.exit', autospec=True), ) as ( parse_args_patch, get_main_conf_patch, get_client_patch, read_service_conf_patch, setup_service_patch, load_system_paasta_config_patch, sensu_patch, sys_exit_patch, ): load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(return_value=self.fake_cluster) setup_marathon_job.main() parse_args_patch.assert_called_once_with() get_main_conf_patch.assert_called_once_with() get_client_patch.assert_called_once_with( self.fake_marathon_config.get_url(), self.fake_marathon_config.get_username(), self.fake_marathon_config.get_password(), ) read_service_conf_patch.assert_called_once_with( decompose_job_id(self.fake_args.service_instance)[0], decompose_job_id(self.fake_args.service_instance)[1], self.fake_cluster, soa_dir=self.fake_args.soa_dir, ) setup_service_patch.assert_called_once_with( decompose_job_id(self.fake_args.service_instance)[0], decompose_job_id(self.fake_args.service_instance)[1], fake_client, self.fake_marathon_config, self.fake_marathon_service_config, 'no_more', ) sys_exit_patch.assert_called_once_with(0) def test_main_failure(self): fake_client = mock.MagicMock() with contextlib.nested( mock.patch( 'paasta_tools.setup_marathon_job.parse_args', return_value=self.fake_args, autospec=True, ), mock.patch( 'paasta_tools.setup_marathon_job.get_main_marathon_config', return_value=self.fake_marathon_config, autospec=True, ), mock.patch( 'paasta_tools.marathon_tools.get_marathon_client', return_value=fake_client, autospec=True, ), mock.patch( 'paasta_tools.marathon_tools.load_marathon_service_config', return_value=self.fake_marathon_service_config, autospec=True, ), mock.patch( 'paasta_tools.setup_marathon_job.setup_service', return_value=(1, 'NEVER'), autospec=True, ), mock.patch('paasta_tools.setup_marathon_job.load_system_paasta_config', autospec=True), mock.patch('paasta_tools.setup_marathon_job.send_event', autospec=True), mock.patch('sys.exit', autospec=True), ) as ( parse_args_patch, get_main_conf_patch, get_client_patch, read_service_conf_patch, setup_service_patch, load_system_paasta_config_patch, sensu_patch, sys_exit_patch, ): load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(return_value=self.fake_cluster) setup_marathon_job.main() parse_args_patch.assert_called_once_with() get_main_conf_patch.assert_called_once_with() get_client_patch.assert_called_once_with( self.fake_marathon_config.get_url(), self.fake_marathon_config.get_username(), self.fake_marathon_config.get_password()) read_service_conf_patch.assert_called_once_with( decompose_job_id(self.fake_args.service_instance)[0], decompose_job_id(self.fake_args.service_instance)[1], self.fake_cluster, soa_dir=self.fake_args.soa_dir) setup_service_patch.assert_called_once_with( decompose_job_id(self.fake_args.service_instance)[0], decompose_job_id(self.fake_args.service_instance)[1], fake_client, self.fake_marathon_config, self.fake_marathon_service_config, 'no_more', ) sys_exit_patch.assert_called_once_with(0) def test_main_sends_event_if_no_deployments(self): fake_client = mock.MagicMock() with contextlib.nested( mock.patch( 'paasta_tools.setup_marathon_job.parse_args', return_value=self.fake_args, autospec=True, ), mock.patch( 'paasta_tools.setup_marathon_job.get_main_marathon_config', return_value=self.fake_marathon_config, autospec=True, ), mock.patch( 'paasta_tools.marathon_tools.get_marathon_client', return_value=fake_client, autospec=True, ), mock.patch( 'paasta_tools.marathon_tools.load_marathon_service_config', side_effect=NoDeploymentsAvailable(), autospec=True, ), mock.patch( 'paasta_tools.setup_marathon_job.setup_service', return_value=(1, 'NEVER'), autospec=True, ), mock.patch('paasta_tools.setup_marathon_job.load_system_paasta_config', autospec=True), mock.patch('paasta_tools.setup_marathon_job.send_event', autospec=True), ) as ( parse_args_patch, get_main_conf_patch, get_client_patch, read_service_conf_patch, setup_service_patch, load_system_paasta_config_patch, sensu_patch, ): load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(return_value=self.fake_cluster) with raises(SystemExit) as exc_info: setup_marathon_job.main() parse_args_patch.assert_called_once_with() get_main_conf_patch.assert_called_once_with() get_client_patch.assert_called_once_with( self.fake_marathon_config.get_url(), self.fake_marathon_config.get_username(), self.fake_marathon_config.get_password()) read_service_conf_patch.assert_called_once_with( decompose_job_id(self.fake_args.service_instance)[0], decompose_job_id(self.fake_args.service_instance)[1], self.fake_cluster, soa_dir=self.fake_args.soa_dir) expected_string = 'No deployments found for %s in cluster %s' % ( self.fake_args.service_instance, self.fake_cluster) sensu_patch.assert_called_once_with( decompose_job_id(self.fake_args.service_instance)[0], decompose_job_id(self.fake_args.service_instance)[1], self.fake_args.soa_dir, Status.CRITICAL, expected_string ) assert exc_info.value.code == 0 def test_send_event(self): fake_service = 'fake_service' fake_instance = 'fake_instance' fake_status = '42' fake_output = 'The http port is not open' fake_soa_dir = '' expected_check_name = 'setup_marathon_job.%s' % compose_job_id(fake_service, fake_instance) with contextlib.nested( mock.patch("paasta_tools.monitoring_tools.send_event", autospec=True), mock.patch("paasta_tools.marathon_tools.load_marathon_service_config", autospec=True), mock.patch("paasta_tools.setup_marathon_job.load_system_paasta_config", autospec=True), ) as ( send_event_patch, load_marathon_service_config_patch, load_system_paasta_config_patch, ): load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(return_value='fake_cluster') load_marathon_service_config_patch.return_value.get_monitoring.return_value = {} setup_marathon_job.send_event( fake_service, fake_instance, fake_soa_dir, fake_status, fake_output ) send_event_patch.assert_called_once_with( fake_service, expected_check_name, {'alert_after': '10m', 'check_every': '10s'}, fake_status, fake_output, fake_soa_dir ) load_marathon_service_config_patch.assert_called_once_with( fake_service, fake_instance, load_system_paasta_config_patch.return_value.get_cluster.return_value, load_deployments=False, ) def test_send_bounce_keepalive(self): fake_service = 'fake_service' fake_instance = 'fake_instance' fake_cluster = 'fake_cluster' fake_soa_dir = '' expected_check_name = 'paasta_bounce_progress.%s' % compose_job_id(fake_service, fake_instance) with contextlib.nested( mock.patch("paasta_tools.monitoring_tools.send_event", autospec=True), mock.patch("paasta_tools.marathon_tools.load_marathon_service_config", autospec=True), ) as ( send_event_patch, load_marathon_service_config_patch, ): load_marathon_service_config_patch.return_value.get_monitoring.return_value = {} setup_marathon_job.send_sensu_bounce_keepalive( service=fake_service, instance=fake_instance, cluster=fake_cluster, soa_dir=fake_soa_dir, ) send_event_patch.assert_called_once_with( service=fake_service, check_name=expected_check_name, overrides=mock.ANY, status=0, output=mock.ANY, soa_dir=fake_soa_dir, ttl='1h', ) load_marathon_service_config_patch.assert_called_once_with( service=fake_service, instance=fake_instance, cluster=fake_cluster, load_deployments=False, ) def test_do_bounce_when_create_app_and_new_app_not_running(self): fake_bounce_func_return = { 'create_app': True, 'tasks_to_drain': [mock.Mock(app_id='fake_task_to_kill_1')], } fake_bounce_func = mock.create_autospec( bounce_lib.brutal_bounce, return_value=fake_bounce_func_return, ) fake_config = {'instances': 5} fake_new_app_running = False fake_happy_new_tasks = ['fake_one', 'fake_two', 'fake_three'] fake_old_app_live_tasks = {} fake_old_app_draining_tasks = {} fake_service = 'fake_service' fake_serviceinstance = 'fake_service.fake_instance' self.fake_cluster = 'fake_cluster' fake_instance = 'fake_instance' fake_bounce_method = 'fake_bounce_method' fake_drain_method = mock.Mock(is_safe_to_kill=lambda t: False) fake_marathon_jobid = 'fake.marathon.jobid' fake_client = mock.create_autospec( marathon.MarathonClient ) expected_new_task_count = fake_config["instances"] - len(fake_happy_new_tasks) expected_drain_task_count = len(fake_bounce_func_return['tasks_to_drain']) with contextlib.nested( mock.patch('paasta_tools.setup_marathon_job._log', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.create_marathon_app', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.kill_old_ids', autospec=True), ) as (mock_log, mock_create_marathon_app, mock_kill_old_ids): setup_marathon_job.do_bounce( bounce_func=fake_bounce_func, drain_method=fake_drain_method, config=fake_config, new_app_running=fake_new_app_running, happy_new_tasks=fake_happy_new_tasks, old_app_live_tasks=fake_old_app_live_tasks, old_app_draining_tasks=fake_old_app_draining_tasks, service=fake_service, bounce_method=fake_bounce_method, serviceinstance=fake_serviceinstance, cluster=self.fake_cluster, instance=fake_instance, marathon_jobid=fake_marathon_jobid, client=fake_client, soa_dir='fake_soa_dir', ) assert mock_log.call_count == 3 first_logged_line = mock_log.mock_calls[0][2]["line"] assert '%s new tasks' % expected_new_task_count in first_logged_line second_logged_line = mock_log.mock_calls[1][2]["line"] assert 'creating new app with app_id %s' % fake_marathon_jobid in second_logged_line third_logged_line = mock_log.mock_calls[2][2]["line"] assert 'draining %s old tasks' % expected_drain_task_count in third_logged_line assert mock_create_marathon_app.call_count == 1 assert fake_client.kill_task.call_count == 0 assert fake_drain_method.drain.call_count == len(fake_bounce_func_return["tasks_to_drain"]) assert mock_kill_old_ids.call_count == 0 def test_do_bounce_when_create_app_and_new_app_running(self): fake_task_to_drain = mock.Mock(app_id='fake_app_to_kill_1') fake_bounce_func_return = { 'create_app': True, 'tasks_to_drain': [fake_task_to_drain], } fake_bounce_func = mock.create_autospec( bounce_lib.brutal_bounce, return_value=fake_bounce_func_return, ) fake_config = {'instances': 5} fake_new_app_running = True fake_happy_new_tasks = ['fake_one', 'fake_two', 'fake_three'] fake_old_app_live_tasks = {'fake_app_to_kill_1': set([fake_task_to_drain])} fake_old_app_draining_tasks = {'fake_app_to_kill_1': set()} fake_service = 'fake_service' fake_serviceinstance = 'fake_service.fake_instance' self.fake_cluster = 'fake_cluster' fake_instance = 'fake_instance' fake_bounce_method = 'fake_bounce_method' fake_drain_method = mock.Mock(is_safe_to_kill=lambda t: False) fake_marathon_jobid = 'fake.marathon.jobid' fake_client = mock.create_autospec( marathon.MarathonClient ) expected_new_task_count = fake_config["instances"] - len(fake_happy_new_tasks) expected_drain_task_count = len(fake_bounce_func_return['tasks_to_drain']) with contextlib.nested( mock.patch('paasta_tools.setup_marathon_job._log', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.create_marathon_app', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.kill_old_ids', autospec=True), ) as (mock_log, mock_create_marathon_app, mock_kill_old_ids): setup_marathon_job.do_bounce( bounce_func=fake_bounce_func, drain_method=fake_drain_method, config=fake_config, new_app_running=fake_new_app_running, happy_new_tasks=fake_happy_new_tasks, old_app_live_tasks=fake_old_app_live_tasks, old_app_draining_tasks=fake_old_app_draining_tasks, service=fake_service, bounce_method=fake_bounce_method, serviceinstance=fake_serviceinstance, cluster=self.fake_cluster, instance=fake_instance, marathon_jobid=fake_marathon_jobid, client=fake_client, soa_dir='fake_soa_dir', ) first_logged_line = mock_log.mock_calls[0][2]["line"] assert '%s new tasks' % expected_new_task_count in first_logged_line second_logged_line = mock_log.mock_calls[1][2]["line"] assert 'draining %s old tasks' % expected_drain_task_count in second_logged_line assert mock_log.call_count == 2 assert mock_create_marathon_app.call_count == 0 assert fake_client.kill_task.call_count == 0 assert mock_kill_old_ids.call_count == 0 assert fake_drain_method.drain.call_count == len(fake_bounce_func_return["tasks_to_drain"]) def test_do_bounce_when_tasks_to_drain(self): fake_task_to_drain = mock.Mock(app_id='fake_app_to_kill_1') fake_bounce_func_return = { 'create_app': False, 'tasks_to_drain': [fake_task_to_drain], } fake_bounce_func = mock.create_autospec( bounce_lib.brutal_bounce, return_value=fake_bounce_func_return, ) fake_config = {'instances': 5} fake_new_app_running = True fake_happy_new_tasks = ['fake_one', 'fake_two', 'fake_three'] fake_old_app_live_tasks = {'fake_app_to_kill_1': set([fake_task_to_drain])} fake_old_app_draining_tasks = {'fake_app_to_kill_1': set([])} fake_service = 'fake_service' fake_serviceinstance = 'fake_service.fake_instance' self.fake_cluster = 'fake_cluster' fake_instance = 'fake_instance' fake_bounce_method = 'fake_bounce_method' fake_drain_method = mock.Mock(is_safe_to_kill=lambda t: False) fake_marathon_jobid = 'fake.marathon.jobid' fake_client = mock.create_autospec( marathon.MarathonClient ) expected_new_task_count = fake_config["instances"] - len(fake_happy_new_tasks) expected_drain_task_count = len(fake_bounce_func_return['tasks_to_drain']) with contextlib.nested( mock.patch('paasta_tools.setup_marathon_job._log', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.create_marathon_app', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.kill_old_ids', autospec=True), ) as (mock_log, mock_create_marathon_app, mock_kill_old_ids): setup_marathon_job.do_bounce( bounce_func=fake_bounce_func, drain_method=fake_drain_method, config=fake_config, new_app_running=fake_new_app_running, happy_new_tasks=fake_happy_new_tasks, old_app_live_tasks=fake_old_app_live_tasks, old_app_draining_tasks=fake_old_app_draining_tasks, service=fake_service, bounce_method=fake_bounce_method, serviceinstance=fake_serviceinstance, cluster=self.fake_cluster, instance=fake_instance, marathon_jobid=fake_marathon_jobid, client=fake_client, soa_dir='fake_soa_dir', ) # assert mock_log.call_count == 3 first_logged_line = mock_log.mock_calls[0][2]["line"] assert '%s new tasks' % expected_new_task_count in first_logged_line second_logged_line = mock_log.mock_calls[1][2]["line"] assert 'draining %s old tasks with app_id %s' % (expected_drain_task_count, 'fake_app_to_kill_1') \ in second_logged_line assert mock_create_marathon_app.call_count == 0 assert fake_client.kill_task.call_count == 0 assert mock_kill_old_ids.call_count == 0 assert fake_drain_method.drain.call_count == expected_drain_task_count def test_do_bounce_when_apps_to_kill(self): fake_bounce_func_return = { 'create_app': False, 'tasks_to_drain': [], } fake_bounce_func = mock.create_autospec( bounce_lib.brutal_bounce, return_value=fake_bounce_func_return, ) fake_config = {'instances': 5} fake_new_app_running = True fake_happy_new_tasks = ['fake_one', 'fake_two', 'fake_three'] fake_old_app_live_tasks = {'fake_app_to_kill_1': set()} fake_old_app_draining_tasks = {'fake_app_to_kill_1': set()} fake_service = 'fake_service' fake_serviceinstance = 'fake_service.fake_instance' self.fake_cluster = 'fake_cluster' fake_instance = 'fake_instance' fake_bounce_method = 'fake_bounce_method' fake_drain_method = mock.Mock() fake_marathon_jobid = 'fake.marathon.jobid' fake_client = mock.create_autospec( marathon.MarathonClient ) expected_new_task_count = fake_config["instances"] - len(fake_happy_new_tasks) with contextlib.nested( mock.patch('paasta_tools.setup_marathon_job._log', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.create_marathon_app', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.kill_old_ids', autospec=True), ) as (mock_log, mock_create_marathon_app, mock_kill_old_ids): setup_marathon_job.do_bounce( bounce_func=fake_bounce_func, drain_method=fake_drain_method, config=fake_config, new_app_running=fake_new_app_running, happy_new_tasks=fake_happy_new_tasks, old_app_live_tasks=fake_old_app_live_tasks, old_app_draining_tasks=fake_old_app_draining_tasks, service=fake_service, bounce_method=fake_bounce_method, serviceinstance=fake_serviceinstance, cluster=self.fake_cluster, instance=fake_instance, marathon_jobid=fake_marathon_jobid, client=fake_client, soa_dir='fake_soa_dir', ) assert mock_log.call_count == 3 first_logged_line = mock_log.mock_calls[0][2]["line"] assert '%s new tasks' % expected_new_task_count in first_logged_line second_logged_line = mock_log.mock_calls[1][2]["line"] assert 'removing old unused apps with app_ids: %s' % 'fake_app_to_kill_1' in second_logged_line assert mock_create_marathon_app.call_count == 0 assert fake_client.kill_task.call_count == len(fake_bounce_func_return["tasks_to_drain"]) assert mock_kill_old_ids.call_count == 1 third_logged_line = mock_log.mock_calls[2][2]["line"] assert '%s bounce on %s finish' % (fake_bounce_method, fake_serviceinstance) in third_logged_line assert 'Now running %s' % fake_marathon_jobid in third_logged_line def test_do_bounce_when_nothing_to_do(self): fake_bounce_func_return = { 'create_app': False, 'tasks_to_drain': [], } fake_bounce_func = mock.create_autospec( bounce_lib.brutal_bounce, return_value=fake_bounce_func_return, ) fake_config = {'instances': 3} fake_new_app_running = True fake_happy_new_tasks = ['fake_one', 'fake_two', 'fake_three'] fake_old_app_live_tasks = {} fake_old_app_draining_tasks = {} fake_service = 'fake_service' fake_serviceinstance = 'fake_service.fake_instance' self.fake_cluster = 'fake_cluster' fake_instance = 'fake_instance' fake_bounce_method = 'fake_bounce_method' fake_drain_method = mock.Mock() fake_marathon_jobid = 'fake.marathon.jobid' fake_client = mock.create_autospec( marathon.MarathonClient ) with contextlib.nested( mock.patch('paasta_tools.setup_marathon_job._log', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.create_marathon_app', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.kill_old_ids', autospec=True), mock.patch('paasta_tools.setup_marathon_job.send_sensu_bounce_keepalive', autospec=True), ) as ( mock_log, mock_create_marathon_app, mock_kill_old_ids, mock_send_sensu_bounce_keepalive, ): setup_marathon_job.do_bounce( bounce_func=fake_bounce_func, drain_method=fake_drain_method, config=fake_config, new_app_running=fake_new_app_running, happy_new_tasks=fake_happy_new_tasks, old_app_live_tasks=fake_old_app_live_tasks, old_app_draining_tasks=fake_old_app_draining_tasks, service=fake_service, bounce_method=fake_bounce_method, serviceinstance=fake_serviceinstance, cluster=self.fake_cluster, instance=fake_instance, marathon_jobid=fake_marathon_jobid, client=fake_client, soa_dir='fake_soa_dir', ) assert mock_log.call_count == 0 assert mock_create_marathon_app.call_count == 0 assert fake_drain_method.drain.call_count == 0 assert mock_kill_old_ids.call_count == 0 # When doing nothing, we need to send the keepalive heartbeat to Sensu mock_send_sensu_bounce_keepalive.assert_called_once_with( service=fake_service, instance=fake_instance, cluster=self.fake_cluster, soa_dir='fake_soa_dir', ) def test_setup_service_srv_already_exists(self): fake_name = 'if_trees_could_talk' fake_instance = 'would_they_scream' fake_client = mock.MagicMock(get_app=mock.Mock(return_value=True)) full_id = marathon_tools.format_job_id(fake_name, fake_instance) fake_complete = { 'seven': 'full', 'eight': 'frightened', 'nine': 'eaten', 'id': full_id, } with contextlib.nested( mock.patch( 'paasta_tools.marathon_tools.create_complete_config', return_value=fake_complete, autospec=True, ), mock.patch( 'paasta_tools.marathon_tools.load_marathon_config', return_value=self.fake_marathon_config, autospec=True, ), mock.patch( 'paasta_tools.setup_marathon_job.deploy_service', autospec=True, ), ) as ( create_config_patch, get_config_patch, deploy_service_patch, ): setup_marathon_job.setup_service( service=fake_name, instance=fake_instance, client=fake_client, marathon_config=self.fake_marathon_config, service_marathon_config=self.fake_marathon_service_config, soa_dir=None, ) create_config_patch.assert_called_once_with( fake_name, fake_instance, self.fake_marathon_config, ) assert deploy_service_patch.call_count == 1 def test_setup_service_srv_does_not_exist(self): fake_name = 'if_talk_was_cheap' fake_instance = 'psychatrists_would_be_broke' fake_response = mock.Mock( json=mock.Mock(return_value={'message': 'test'})) fake_client = mock.MagicMock(get_app=mock.Mock( side_effect=marathon.exceptions.NotFoundError(fake_response))) full_id = marathon_tools.format_job_id(fake_name, fake_instance, 'oogabooga', 'bananafanafofooga') fake_complete = { 'do': 'you', 'even': 'dota', 'id': full_id, 'docker_image': 'fake_docker_registry/fake_docker_image', } fake_bounce = 'trampoline' fake_drain_method = 'noop' fake_drain_method_params = {} with contextlib.nested( mock.patch( 'paasta_tools.marathon_tools.create_complete_config', return_value=fake_complete, autospec=True, ), mock.patch( 'paasta_tools.setup_marathon_job.deploy_service', return_value=(111, 'Never'), autospec=True, ), mock.patch.object( self.fake_marathon_service_config, 'get_bounce_method', return_value=fake_bounce, autospec=True, ), mock.patch.object( self.fake_marathon_service_config, 'get_drain_method', return_value=fake_drain_method, autospec=True, ), mock.patch.object( self.fake_marathon_service_config, 'get_drain_method_params', return_value=fake_drain_method_params, autospec=True, ), mock.patch( 'paasta_tools.marathon_tools.load_marathon_service_config', return_value=self.fake_marathon_service_config, autospec=True, ), mock.patch( 'paasta_tools.marathon_tools.load_service_namespace_config', return_value=self.fake_service_namespace_config, autospec=True, ), ) as ( create_config_patch, deploy_service_patch, get_bounce_patch, get_drain_method_patch, get_drain_method_params_patch, read_service_conf_patch, read_namespace_conf_patch, ): status, output = setup_marathon_job.setup_service( service=fake_name, instance=fake_instance, client=fake_client, marathon_config=self.fake_marathon_config, service_marathon_config=self.fake_marathon_service_config, soa_dir=None, ) assert status == 111 assert output == 'Never' create_config_patch.assert_called_once_with( fake_name, fake_instance, self.fake_marathon_config ) get_bounce_patch.assert_called_once_with() get_drain_method_patch.assert_called_once_with(read_namespace_conf_patch.return_value) deploy_service_patch.assert_called_once_with( service=fake_name, instance=fake_instance, marathon_jobid=full_id, config=fake_complete, client=fake_client, bounce_method=fake_bounce, drain_method_name=fake_drain_method, drain_method_params=fake_drain_method_params, nerve_ns=self.fake_marathon_service_config.get_nerve_namespace(), bounce_health_params=self.fake_marathon_service_config.get_bounce_health_params( read_namespace_conf_patch.return_value), soa_dir=None, ) def test_setup_service_srv_complete_config_raises(self): fake_name = 'test_service' fake_instance = 'test_instance' with mock.patch( 'paasta_tools.setup_marathon_job.marathon_tools.create_complete_config', side_effect=NoDockerImageError, ): status, output = setup_marathon_job.setup_service( service=fake_name, instance=fake_instance, client=None, marathon_config=None, service_marathon_config=None, soa_dir=None, ) assert status == 1 expected = 'Docker image for test_service.test_instance not in' assert expected in output def test_deploy_service_unknown_drain_method(self): fake_bounce = 'exists' fake_drain_method = 'doesntexist' fake_name = 'whoa' fake_instance = 'the_earth_is_tiny' fake_id = marathon_tools.format_job_id(fake_name, fake_instance) fake_apps = [mock.Mock(id=fake_id, tasks=[]), mock.Mock(id=('%s2' % fake_id), tasks=[])] fake_client = mock.MagicMock( list_apps=mock.Mock(return_value=fake_apps)) fake_config = {'id': fake_id, 'instances': 2} errormsg = 'ERROR: drain_method not recognized: doesntexist. Must be one of (exists1, exists2)' expected = (1, errormsg) with contextlib.nested( mock.patch('paasta_tools.setup_marathon_job._log', autospec=True), mock.patch('paasta_tools.setup_marathon_job.load_system_paasta_config', autospec=True), mock.patch( 'paasta_tools.drain_lib._drain_methods', new={'exists1': mock.Mock(), 'exists2': mock.Mock()}, ) ) as (mock_log, mock_load_system_paasta_config, mock_drain_methods): mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(return_value='fake_cluster') actual = setup_marathon_job.deploy_service( service=fake_name, instance=fake_instance, marathon_jobid=fake_id, config=fake_config, client=fake_client, bounce_method=fake_bounce, drain_method_name=fake_drain_method, drain_method_params={}, nerve_ns=fake_instance, bounce_health_params={}, soa_dir='fake_soa_dir', ) assert mock_log.call_count == 1 assert expected == actual def test_deploy_service_unknown_bounce(self): fake_bounce = 'WHEEEEEEEEEEEEEEEE' fake_drain_method = 'noop' fake_name = 'whoa' fake_instance = 'the_earth_is_tiny' fake_id = marathon_tools.format_job_id(fake_name, fake_instance) fake_apps = [mock.Mock(id=fake_id, tasks=[]), mock.Mock(id=('%s2' % fake_id), tasks=[])] fake_client = mock.MagicMock( list_apps=mock.Mock(return_value=fake_apps)) fake_config = {'id': fake_id, 'instances': 2} errormsg = 'ERROR: bounce_method not recognized: %s. Must be one of (%s)' % \ (fake_bounce, ', '.join(list_bounce_methods())) expected = (1, errormsg) with contextlib.nested( mock.patch('paasta_tools.setup_marathon_job._log', autospec=True), mock.patch('paasta_tools.setup_marathon_job.load_system_paasta_config', autospec=True), ) as (mock_log, mock_load_system_paasta_config): mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(return_value='fake_cluster') actual = setup_marathon_job.deploy_service( service=fake_name, instance=fake_instance, marathon_jobid=fake_id, config=fake_config, client=fake_client, bounce_method=fake_bounce, drain_method_name=fake_drain_method, drain_method_params={}, nerve_ns=fake_instance, bounce_health_params={}, soa_dir='fake_soa_dir', ) assert mock_log.call_count == 1 assert expected == actual fake_client.list_apps.assert_called_once_with(embed_failures=True) assert fake_client.create_app.call_count == 0 def test_deploy_service_known_bounce(self): fake_bounce = 'areallygoodbouncestrategy' fake_drain_method_name = 'noop' fake_name = 'how_many_strings' fake_instance = 'will_i_need_to_think_of' fake_id = marathon_tools.format_job_id(fake_name, fake_instance, 'git11111111', 'config11111111') fake_config = {'id': fake_id, 'instances': 2} old_app_id = marathon_tools.format_job_id(fake_name, fake_instance, 'git22222222', 'config22222222') old_task_to_drain = mock.Mock(id="old_task_to_drain", app_id=old_app_id) old_task_is_draining = mock.Mock(id="old_task_is_draining", app_id=old_app_id) old_task_dont_drain = mock.Mock(id="old_task_dont_drain", app_id=old_app_id) old_app = mock.Mock(id="/%s" % old_app_id, tasks=[old_task_to_drain, old_task_is_draining, old_task_dont_drain]) fake_client = mock.MagicMock( list_apps=mock.Mock(return_value=[old_app]), kill_task=mock.Mock(spec=lambda app_id, id, scale=False: None), ) fake_bounce_func = mock.create_autospec( bounce_lib.brutal_bounce, return_value={ "create_app": True, "tasks_to_drain": [old_task_to_drain], } ) fake_drain_method = mock.Mock(is_draining=lambda t: t is old_task_is_draining, is_safe_to_kill=lambda t: True) with contextlib.nested( mock.patch( 'paasta_tools.bounce_lib.get_bounce_method_func', return_value=fake_bounce_func, autospec=True, ), mock.patch( 'paasta_tools.bounce_lib.bounce_lock_zookeeper', autospec=True ), mock.patch( 'paasta_tools.bounce_lib.get_happy_tasks', autospec=True, side_effect=lambda x, _, __, **kwargs: x, ), mock.patch('paasta_tools.bounce_lib.kill_old_ids', autospec=True), mock.patch('paasta_tools.bounce_lib.create_marathon_app', autospec=True), mock.patch('paasta_tools.setup_marathon_job._log', autospec=True), mock.patch('paasta_tools.setup_marathon_job.load_system_paasta_config', autospec=True), mock.patch('paasta_tools.drain_lib.get_drain_method', return_value=fake_drain_method), ) as (_, _, _, kill_old_ids_patch, create_marathon_app_patch, mock_log, mock_load_system_paasta_config, _): mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(return_value='fake_cluster') result = setup_marathon_job.deploy_service( service=fake_name, instance=fake_instance, marathon_jobid=fake_id, config=fake_config, client=fake_client, bounce_method=fake_bounce, drain_method_name=fake_drain_method_name, drain_method_params={}, nerve_ns=fake_instance, bounce_health_params={}, soa_dir='fake_soa_dir', ) assert result[0] == 0, "Expected successful result; got (%d, %s)" % result fake_client.list_apps.assert_called_once_with(embed_failures=True) assert fake_client.create_app.call_count == 0 fake_bounce_func.assert_called_once_with( new_config=fake_config, new_app_running=False, happy_new_tasks=[], old_app_live_tasks={old_app.id: set([old_task_to_drain, old_task_dont_drain])}, ) assert fake_drain_method.drain.call_count == 2 fake_drain_method.drain.assert_any_call(old_task_is_draining) fake_drain_method.drain.assert_any_call(old_task_to_drain) assert fake_client.kill_task.call_count == 2 fake_client.kill_task.assert_any_call(old_app_id, old_task_is_draining.id, scale=True) fake_client.kill_task.assert_any_call(old_app_id, old_task_to_drain.id, scale=True) create_marathon_app_patch.assert_called_once_with(fake_config['id'], fake_config, fake_client) assert kill_old_ids_patch.call_count == 0 # We should call _log 5 times: # 1. bounce starts # 2. create new app # 3. draining old tasks # 4. remove old apps # 5. bounce finishes assert mock_log.call_count == 5 def test_deploy_service_already_bouncing(self): fake_bounce = 'areallygoodbouncestrategy' fake_drain_method = 'noop' fake_name = 'how_many_strings' fake_instance = 'will_i_need_to_think_of' fake_id = marathon_tools.format_job_id(fake_name, fake_instance, 'gityourmom', 'configyourdad') fake_config = {'id': fake_id, 'instances': 2} old_app_id = ('%s2' % fake_id) old_task = mock.Mock(id="old_task_id", app_id=old_app_id) old_app = mock.Mock(id=old_app_id, tasks=[old_task]) fake_client = mock.MagicMock( list_apps=mock.Mock(return_value=[old_app]), kill_task=mock.Mock(spec=lambda app_id, id, scale=False: None), ) fake_bounce_func = mock.create_autospec( bounce_lib.brutal_bounce, return_value={ "create_app": True, "tasks_to_drain": [old_task], } ) fake_short_id = marathon_tools.format_job_id(fake_name, fake_instance) with contextlib.nested( mock.patch( 'paasta_tools.bounce_lib.get_bounce_method_func', return_value=fake_bounce_func, autospec=True, ), mock.patch( 'paasta_tools.bounce_lib.bounce_lock_zookeeper', side_effect=bounce_lib.LockHeldException, autospec=True ), mock.patch( 'paasta_tools.bounce_lib.get_happy_tasks', autospec=True, side_effect=lambda x, _, __, **kwargs: x, ), mock.patch('paasta_tools.setup_marathon_job._log', autospec=True), mock.patch('paasta_tools.setup_marathon_job.load_system_paasta_config', autospec=True), ) as (_, _, _, _, mock_load_system_paasta_config): mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(return_value='fake_cluster') result = setup_marathon_job.deploy_service( service=fake_name, instance=fake_instance, marathon_jobid=fake_id, config=fake_config, client=fake_client, bounce_method=fake_bounce, drain_method_name=fake_drain_method, drain_method_params={}, nerve_ns=fake_instance, bounce_health_params={}, soa_dir='fake_soa_dir', ) assert result == (1, "Instance %s is already being bounced." % fake_short_id) def test_deploy_service_logs_exceptions(self): fake_bounce = 'WHEEEEEEEEEEEEEEEE' fake_drain_method = 'noop' fake_name = 'whoa' fake_instance = 'the_earth_is_tiny' fake_id = marathon_tools.format_job_id(fake_name, fake_instance) fake_apps = [mock.Mock(id=fake_id, tasks=[]), mock.Mock(id=('%s2' % fake_id), tasks=[])] fake_client = mock.MagicMock( list_apps=mock.Mock(return_value=fake_apps)) fake_config = {'id': fake_id, 'instances': 2} with contextlib.nested( mock.patch('paasta_tools.setup_marathon_job._log', autospec=True), mock.patch('paasta_tools.setup_marathon_job.bounce_lib.get_bounce_method_func', side_effect=IOError('foo')), mock.patch('paasta_tools.setup_marathon_job.load_system_paasta_config', autospec=True), ) as (mock_log, mock_bounce, mock_load_system_paasta_config): mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(return_value='fake_cluster') with raises(IOError): setup_marathon_job.deploy_service( service=fake_name, instance=fake_instance, marathon_jobid=fake_id, config=fake_config, client=fake_client, bounce_method=fake_bounce, drain_method_name=fake_drain_method, drain_method_params={}, nerve_ns=fake_instance, bounce_health_params={}, soa_dir='fake_soa_dir', ) assert fake_name in mock_log.mock_calls[0][2]["line"] assert 'Traceback' in mock_log.mock_calls[1][2]["line"] def test_get_marathon_config(self): fake_conf = {'oh_no': 'im_a_ghost'} with mock.patch( 'paasta_tools.marathon_tools.load_marathon_config', return_value=fake_conf, autospec=True ) as get_conf_patch: assert setup_marathon_job.get_main_marathon_config() == fake_conf get_conf_patch.assert_called_once_with() def test_get_old_live_draining_tasks_empty(self): fake_name = 'whoa' fake_instance = 'the_earth_is_tiny' fake_id = marathon_tools.format_job_id(fake_name, fake_instance) fake_apps = [ mock.Mock(id=fake_id, tasks=[]), mock.Mock(id=('%s2' % fake_id), tasks=[]) ] expected_live_tasks = { fake_apps[0].id: set(), fake_apps[1].id: set(), } expected_draining_tasks = { fake_apps[0].id: set(), fake_apps[1].id: set(), } fake_drain_method = mock.Mock(is_draining=lambda _: True) actual = setup_marathon_job.get_old_live_draining_tasks(fake_apps, fake_drain_method) actual_live_tasks, actual_draining_tasks = actual assert actual_live_tasks == expected_live_tasks assert actual_draining_tasks == expected_draining_tasks def test_get_old_live_draining_tasks_not_empty(self): fake_name = 'whoa' fake_instance = 'the_earth_is_tiny' fake_id = marathon_tools.format_job_id(fake_name, fake_instance) def fake_task(state): return mock.Mock(_drain_state=state) fake_apps = [ mock.Mock(id=fake_id, tasks=[fake_task('up'), fake_task('down')]), mock.Mock(id=('%s2' % fake_id), tasks=[fake_task('up'), fake_task('down')]) ] expected_live_tasks = { fake_apps[0].id: set([fake_apps[0].tasks[0]]), fake_apps[1].id: set([fake_apps[1].tasks[0]]), } expected_draining_tasks = { fake_apps[0].id: set([fake_apps[0].tasks[1]]), fake_apps[1].id: set([fake_apps[1].tasks[1]]), } fake_drain_method = mock.Mock(is_draining=lambda t: t._drain_state == 'down') actual = setup_marathon_job.get_old_live_draining_tasks(fake_apps, fake_drain_method) actual_live_tasks, actual_draining_tasks = actual assert actual_live_tasks == expected_live_tasks assert actual_draining_tasks == expected_draining_tasks
from paasta_tools.utils import compose_job_id from paasta_tools.utils import DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT from paasta_tools.utils import NoDockerImageError from paasta_tools.utils import PaastaColors from paasta_tools.utils import remove_ansi_escape_sequences from paasta_tools.utils import SystemPaastaConfig fake_marathon_job_config = marathon_tools.MarathonServiceConfig( service='servicename', cluster='clustername', instance='instancename', config_dict={ 'instances': 3, 'cpus': 1, 'mem': 100, 'disk': 512, 'nerve_ns': 'fake_nerve_ns', }, branch_dict={ 'docker_image': 'test_docker:1.0', 'desired_state': 'start', 'force_bounce': None, }, ) def test_get_bouncing_status(): with contextlib.nested( mock.patch( 'paasta_tools.marathon_serviceinit.marathon_tools.get_matching_appids', autospec=True), ) as (mock_get_matching_appids, ):