def load_tron_service_config(service, tron_cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): """Load all configured jobs for a service, and any additional config values.""" config = service_configuration_lib.read_extra_service_information( service, 'tron-' + tron_cluster, soa_dir) if not config: tron_conf_path = os.path.join( os.path.abspath(soa_dir), 'tron', tron_cluster, service + '.yaml', ) config = service_configuration_lib._read_yaml_file(tron_conf_path) if not config: raise NoConfigurationForServiceError( 'No Tron configuration found for service %s' % service) extra_config = { key: value for key, value in config.items() if key != 'jobs' } job_configs = [ TronJobConfig( config_dict=job, load_deployments=load_deployments, soa_dir=soa_dir, ) for job in config.get('jobs') or [] ] return job_configs, extra_config
def load_adhoc_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir ) adhoc_conf_file = "adhoc-%s" % cluster log.info("Reading adhoc configuration file: %s.yaml", adhoc_conf_file) instance_configs = service_configuration_lib.read_extra_service_information( service_name=service, extra_info=adhoc_conf_file, soa_dir=soa_dir ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, adhoc_conf_file) ) general_config = deep_merge_dictionaries(overrides=instance_configs[instance], defaults=general_config) branch_dict = {} if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) branch = general_config.get('branch', get_paasta_branch(cluster, instance)) deploy_group = general_config.get('deploy_group', branch) branch_dict = deployments_json.get_branch_dict_v2(service, branch, deploy_group) return AdhocJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, )
def load_chronos_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise NoConfigurationForServiceError('No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) general_config = deep_merge_dictionaries(overrides=service_chronos_jobs[instance], defaults=general_config) return ChronosJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def test_service_group_rules_empty_when_service_is_deleted( service_group, mock_service_config): """A deleted service which still has running containers shouldn't cause exceptions.""" with mock.patch.object(firewall, 'get_instance_config', side_effect=NoConfigurationForServiceError()): assert service_group.get_rules( DEFAULT_SOA_DIR, firewall.DEFAULT_SYNAPSE_SERVICE_DIR) == ()
def load_marathon_service_config_no_cache(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): """Read a service instance's configuration for marathon. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param name: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" log.info("Reading service configuration files from dir %s/ in %s" % (service, soa_dir)) log.info("Reading general configuration file: service.yaml") general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) marathon_conf_file = "marathon-%s" % cluster log.info("Reading marathon configuration file: %s.yaml", marathon_conf_file) instance_configs = service_configuration_lib.read_extra_service_information( service, marathon_conf_file, soa_dir=soa_dir, ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, marathon_conf_file)) general_config = deep_merge_dictionaries( overrides=instance_configs[instance], defaults=general_config) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = general_config.get('branch', get_paasta_branch(cluster, instance)) branch_dict = deployments_json.get_branch_dict(service, branch) return MarathonServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def load_tron_yaml(service: str, cluster: str, soa_dir: str) -> Dict[str, Any]: tronfig_folder = get_tronfig_folder(soa_dir=soa_dir, cluster=cluster) config = service_configuration_lib.read_extra_service_information( service_name=service, extra_info=f'tron-{cluster}', soa_dir=soa_dir, ) if not config: config = service_configuration_lib._read_yaml_file(os.path.join(tronfig_folder, f"{service}.yaml")) if not config: raise NoConfigurationForServiceError('No Tron configuration found for service %s' % service) return config
def test_guess_instance_fails(mock_list_all_instances_for_service, ): mock_list_all_instances_for_service.side_effect = NoConfigurationForServiceError( ) fake_service = 'fake_service' args = mock.MagicMock() args.service = fake_service args.cluster = None with raises(SystemExit) as excinfo: utils.guess_cluster( service=fake_service, args=args, ) assert excinfo.value.code == 2
def load_chronos_job_config( service: str, instance: str, cluster: str, load_deployments: bool = True, soa_dir: str = DEFAULT_SOA_DIR, ) -> 'ChronosJobConfig': general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) if instance.startswith('_'): raise InvalidJobNameError( "Unable to load chronos job config for %s.%s as instance name starts with '_'" % (service, instance), ) service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise NoConfigurationForServiceError( 'No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = None general_config = deep_merge_dictionaries( overrides=service_chronos_jobs[instance], defaults=general_config) if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) temp_instance_config = ChronosJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=None, soa_dir=soa_dir, ) branch = temp_instance_config.get_branch() deploy_group = temp_instance_config.get_deploy_group() branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group) return ChronosJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def test_guess_cluster_when_missing_cluster_exception( mock_get_default_cluster_for_service, ): mock_get_default_cluster_for_service.side_effect = NoConfigurationForServiceError( ) fake_service = 'fake_service' args = mock.MagicMock() args.service = fake_service args.instance = 'fake_instance' args.cluster = None with raises(SystemExit) as excinfo: utils.guess_cluster( service=fake_service, args=args, ) assert excinfo.value.code == 2
def load_adhoc_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) adhoc_conf_file = "adhoc-%s" % cluster instance_configs = service_configuration_lib.read_extra_service_information( service_name=service, extra_info=adhoc_conf_file, soa_dir=soa_dir, ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, adhoc_conf_file), ) general_config = deep_merge_dictionaries( overrides=instance_configs[instance], defaults=general_config) branch_dict = None if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) temp_instance_config = AdhocJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=None, soa_dir=soa_dir, ) branch = temp_instance_config.get_branch() deploy_group = temp_instance_config.get_deploy_group() branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group) return AdhocJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def load_chronos_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise NoConfigurationForServiceError('No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) return ChronosJobConfig( service=service, cluster=cluster, instance=instance, config_dict=service_chronos_jobs[instance], branch_dict=branch_dict, )
def test_main_bad_chronos_job_config_notifies_user(self): with contextlib.nested( mock.patch('paasta_tools.setup_chronos_job.parse_args', return_value=self.fake_args, autospec=True), mock.patch('paasta_tools.chronos_tools.load_chronos_config', autospec=True), mock.patch('paasta_tools.chronos_tools.get_chronos_client', return_value=self.fake_client, autospec=True), mock.patch('paasta_tools.chronos_tools.create_complete_config', autospec=True, side_effect=NoConfigurationForServiceError( 'test bad configuration')), mock.patch('paasta_tools.setup_chronos_job.setup_job', return_value=(0, 'it_is_finished'), autospec=True), mock.patch( 'paasta_tools.setup_chronos_job.load_system_paasta_config', autospec=True), mock.patch('paasta_tools.setup_chronos_job.send_event', autospec=True), ) as ( parse_args_patch, load_chronos_config_patch, get_client_patch, load_chronos_job_config_patch, setup_job_patch, load_system_paasta_config_patch, send_event_patch, ): load_system_paasta_config_patch.return_value.get_cluster = mock.MagicMock( return_value=self.fake_cluster) with raises(SystemExit) as excinfo: setup_chronos_job.main() assert excinfo.value.code == 0 expected_error_msg = ( "Could not read chronos configuration file for %s in cluster %s\nError was: test bad configuration" % (compose_job_id(self.fake_service, self.fake_instance), self.fake_cluster)) send_event_patch.assert_called_once_with( service=self.fake_service, instance=self.fake_instance, soa_dir=self.fake_args.soa_dir, status=Status.CRITICAL, output=expected_error_msg)
def load_tron_instance_config( service: str, instance: str, cluster: str, load_deployments: bool = True, soa_dir: str = DEFAULT_SOA_DIR, ) -> TronActionConfig: jobs, _ = load_tron_service_config( service=service, cluster=cluster, load_deployments=load_deployments, soa_dir=soa_dir, ) requested_job, requested_action = instance.split('.') for job in jobs: if job.get_name() == requested_job: for action in job.get_actions(): if action.get_action_name() == requested_action: return action raise NoConfigurationForServiceError(f"No tron configuration found for {service} {instance}")
def test_configure_and_run_missing_cluster_exception( mock_get_default_cluster_for_service, mock_load_marathon_service_config, mock_load_system_paasta_config, ): mock_get_default_cluster_for_service.side_effect = NoConfigurationForServiceError( ) mock_load_system_paasta_config.return_value = SystemPaastaConfig( {'volumes': []}, '/fake_dir/') mock_docker_client = mock.MagicMock(spec_set=docker.Client) fake_service = 'fake_service' docker_hash = '8' * 40 args = mock.MagicMock() args.cmd = 'fake_command' args.service = fake_service args.healthcheck = False args.instance = 'fake_instance' args.interactive = False args.cluster = None with raises(SystemExit) as excinfo: configure_and_run_docker_container(mock_docker_client, docker_hash, fake_service, args) assert excinfo.value.code == 2
def load_kubernetes_service_config_no_cache( service: str, instance: str, cluster: str, load_deployments: bool = True, soa_dir: str = DEFAULT_SOA_DIR, ) -> "KubernetesDeploymentConfig": """Read a service instance's configuration for kubernetes. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param name: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) kubernetes_conf_file = "kubernetes-%s" % cluster instance_configs = service_configuration_lib.read_extra_service_information( service, kubernetes_conf_file, soa_dir=soa_dir, ) if instance.startswith('_'): raise InvalidJobNameError( f"Unable to load kubernetes job config for {service}.{instance} as instance name starts with '_'", ) if instance not in instance_configs: raise NoConfigurationForServiceError( f"{instance} not found in config file {soa_dir}/{service}/{kubernetes_conf_file}.yaml.", ) general_config = deep_merge_dictionaries( overrides=instance_configs[instance], defaults=general_config) branch_dict: Optional[BranchDictV2] = None if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) temp_instance_config = KubernetesDeploymentConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=None, soa_dir=soa_dir, ) branch = temp_instance_config.get_branch() deploy_group = temp_instance_config.get_deploy_group() branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group) return KubernetesDeploymentConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
instance="fake_inst", instance_type="flink", settings=settings, include_smartstack=False, include_envoy=None, # default of true in api specs ), ] @mock.patch("paasta_tools.instance.kubernetes.kubernetes_mesh_status", autospec=True) @mock.patch("paasta_tools.api.views.instance.validate_service_instance", autospec=True) @pytest.mark.parametrize( "validate_side_eft,mesh_status_side_eft,expected_msg,expected_code", [ ( NoConfigurationForServiceError(), {"envoy": None}, "No instance named 'fake_service.fake_inst' has been configured", 404, ), (Exception(), {"envoy": None}, "Traceback", 500), ("flink", RuntimeError("runtimeerror"), "runtimeerror", 405), ("flink", Exception(), "Traceback", 500), ], ) def test_instance_mesh_status_error( mock_validate_service_instance, mock_kubernetes_mesh_status, validate_side_eft, mesh_status_side_eft, expected_msg,