def load_paasta_native_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): service_paasta_native_jobs = read_paasta_native_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_paasta_native_jobs: filename = '%s/%s/paasta_native-%s.yaml' % (soa_dir, service, cluster) raise UnknownPaastaNativeServiceError( 'No job named "%s" in config file %s: \n%s' % (instance, filename, open(filename).read()) ) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) service_config = PaastaNativeServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=service_paasta_native_jobs[instance], branch_dict=branch_dict, ) service_namespace_config = load_service_namespace_config(service, service_config.get_nerve_namespace(), soa_dir=soa_dir) service_config.service_namespace_config = service_namespace_config return service_config
def test_DeploymentsJson_read(): file_mock = mock.MagicMock(spec=file) fake_dir = '/var/dir_of_fake' fake_path = '/var/dir_of_fake/fake_service/deployments.json' fake_json = { 'v1': { 'no_srv:blaster': { 'docker_image': 'test_rocker:9.9', 'desired_state': 'start', 'force_bounce': None, }, 'dont_care:about': { 'docker_image': 'this:guy', 'desired_state': 'stop', 'force_bounce': '12345', }, }, } with contextlib.nested( mock.patch('paasta_tools.utils.open', create=True, return_value=file_mock), mock.patch('json.load', autospec=True, return_value=fake_json), mock.patch('paasta_tools.utils.os.path.isfile', autospec=True, return_value=True), ) as ( open_patch, json_patch, isfile_patch, ): actual = utils.load_deployments_json('fake_service', fake_dir) open_patch.assert_called_once_with(fake_path) json_patch.assert_called_once_with(file_mock.__enter__()) assert actual == fake_json['v1']
def step_impl_then_desired_state(context, expected_state): deployments = load_deployments_json('fake_deployments_json_service', soa_dir='fake_soa_configs') latest = sorted(deployments.iteritems(), key=lambda (key, value): value['force_bounce'], reverse=True)[0][1] desired_state = latest['desired_state'] assert desired_state == expected_state, "actual: %s\nexpected: %s" % ( desired_state, expected_state)
def load_marathon_service_config_no_cache(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): """Read a service instance's configuration for marathon. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param name: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" log.info("Reading service configuration files from dir %s/ in %s" % (service, soa_dir)) log.info("Reading general configuration file: service.yaml") general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) marathon_conf_file = "marathon-%s" % cluster log.info("Reading marathon configuration file: %s.yaml", marathon_conf_file) instance_configs = service_configuration_lib.read_extra_service_information( service, marathon_conf_file, soa_dir=soa_dir, ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, marathon_conf_file)) general_config = deep_merge_dictionaries( overrides=instance_configs[instance], defaults=general_config) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = general_config.get('branch', get_paasta_branch(cluster, instance)) branch_dict = deployments_json.get_branch_dict(service, branch) return MarathonServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def load_chronos_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise InvalidChronosConfigError('No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) return ChronosJobConfig(service, instance, service_chronos_jobs[instance], branch_dict)
def _get_branch_dict(self, cluster: str, instance: str, config: Dict[Any, Any]): if self._load_deployments: if self._deployments_json is None: self._deployments_json = load_deployments_json( self._service, soa_dir=self._soa_dir) branch = config.get('branch', get_paasta_branch(cluster, instance)) return self._deployments_json.get_branch_dict( self._service, branch) else: return {}
def step_impl_then_desired_state(context, expected_state): deployments = load_deployments_json("fake_deployments_json_service", soa_dir="fake_soa_configs") latest = sorted( deployments.config_dict.items(), key=lambda kv: kv[1]["force_bounce"] or "", reverse=True, )[0][1] desired_state = latest["desired_state"] assert (desired_state == expected_state ), f"actual: {desired_state}\nexpected: {expected_state}"
def step_impl_then(context): deployments = load_deployments_json('fake_deployments_json_service', soa_dir='fake_soa_configs') expected_deployments = { 'fake_deployments_json_service:paasta-test_cluster.test_instance': { 'force_bounce': context.force_bounce_timestamp, 'desired_state': 'stop', 'docker_image': 'services-fake_deployments_json_service:paasta-%s' % context.expected_commit }, 'fake_deployments_json_service:paasta-test_cluster.test_instance_2': { 'force_bounce': None, 'desired_state': 'start', 'docker_image': 'services-fake_deployments_json_service:paasta-%s' % context.expected_commit, }, } assert expected_deployments == deployments, "actual: %s\nexpected:%s" % (deployments, expected_deployments)
def step_impl_then(context): deployments = load_deployments_json("fake_deployments_json_service", soa_dir="fake_soa_configs") expected_deployments = { "fake_deployments_json_service:paasta-test_cluster.test_instance": { "force_bounce": context.force_bounce_timestamp, "desired_state": "stop", "docker_image": "services-fake_deployments_json_service:paasta-%s" % context.expected_commit, }, "fake_deployments_json_service:paasta-test_cluster.test_instance_2": { "force_bounce": None, "desired_state": "start", "docker_image": "services-fake_deployments_json_service:paasta-%s" % context.expected_commit, }, } assert expected_deployments == deployments, "actual: %s\nexpected:%s" % (deployments, expected_deployments)
def get_actual_deployments(service: str, soa_dir: str) -> Mapping[str, str]: deployments_json = load_deployments_json(service, soa_dir) if not deployments_json: paasta_print( "Warning: it looks like %s has not been deployed anywhere yet!" % service, file=sys.stderr, ) # Create a dictionary of actual $service Jenkins deployments actual_deployments = {} for key, branch_dict in deployments_json.config_dict.items(): service, namespace = key.split(":") if service == service: value = branch_dict["docker_image"] sha = value[value.rfind("-") + 1 :] actual_deployments[namespace.replace("paasta-", "", 1)] = sha return actual_deployments
def load_chronos_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise NoConfigurationForServiceError('No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) return ChronosJobConfig( service=service, cluster=cluster, instance=instance, config_dict=service_chronos_jobs[instance], branch_dict=branch_dict, )
def load_marathon_service_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): """Read a service instance's configuration for marathon. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param name: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" log.info("Reading service configuration files from dir %s/ in %s" % (service, soa_dir)) log.info("Reading general configuration file: service.yaml") general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir ) marathon_conf_file = "marathon-%s" % cluster log.info("Reading marathon configuration file: %s.yaml", marathon_conf_file) instance_configs = service_configuration_lib.read_extra_service_information( service, marathon_conf_file, soa_dir=soa_dir ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, marathon_conf_file) ) general_config = deep_merge_dictionaries(overrides=instance_configs[instance], defaults=general_config) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = general_config.get('branch', get_paasta_branch(cluster, instance)) branch_dict = deployments_json.get_branch_dict(service, branch) return MarathonServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, )
def load_chronos_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise InvalidChronosConfigError( 'No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_default_branch(cluster, instance) branch_dict = deployments_json.get_branch_dict(service, branch) return ChronosJobConfig(service, instance, service_chronos_jobs[instance], branch_dict)
def test_DeploymentsJson_read(): file_mock = mock.MagicMock(spec=file) fake_dir = "/var/dir_of_fake" fake_path = "/var/dir_of_fake/fake_service/deployments.json" fake_json = { "v1": { "no_srv:blaster": {"docker_image": "test_rocker:9.9", "desired_state": "start", "force_bounce": None}, "dont_care:about": {"docker_image": "this:guy", "desired_state": "stop", "force_bounce": "12345"}, } } with contextlib.nested( mock.patch("paasta_tools.utils.open", create=True, return_value=file_mock), mock.patch("json.load", autospec=True, return_value=fake_json), mock.patch("paasta_tools.utils.os.path.isfile", autospec=True, return_value=True), ) as (open_patch, json_patch, isfile_patch): actual = utils.load_deployments_json("fake_service", fake_dir) open_patch.assert_called_once_with(fake_path) json_patch.assert_called_once_with(file_mock.__enter__()) assert actual == fake_json["v1"]
def load_paasta_native_job_config( service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR, instance_type='paasta_native', config_overrides=None, ) -> NativeServiceConfig: service_paasta_native_jobs = read_service_config( service=service, instance=instance, instance_type=instance_type, cluster=cluster, soa_dir=soa_dir, ) branch_dict: BranchDict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) instance_config_dict = service_paasta_native_jobs[instance].copy() instance_config_dict.update(config_overrides or {}) service_config = NativeServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=instance_config_dict, branch_dict=branch_dict, soa_dir=soa_dir, ) service_namespace_config = load_service_namespace_config( service=service, namespace=service_config.get_nerve_namespace(), soa_dir=soa_dir, ) service_config.service_namespace_config = service_namespace_config return service_config
def step_impl_then(context): deployments = load_deployments_json( "fake_deployments_json_service", soa_dir="fake_soa_configs" ) expected_deployments = DeploymentsJsonV1( { "fake_deployments_json_service:paasta-test-cluster.test_instance": { "force_bounce": context.force_bounce_timestamp, "desired_state": "stop", "docker_image": "services-fake_deployments_json_service:paasta-%s" % context.expected_commit, }, "fake_deployments_json_service:paasta-test-cluster.test_instance_2": { "force_bounce": None, "desired_state": "start", "docker_image": "services-fake_deployments_json_service:paasta-%s" % context.expected_commit, }, } ) assert ( expected_deployments == deployments ), f"actual: {deployments}\nexpected:{expected_deployments}"
def step_impl_then_desired_state(context, expected_state): deployments = load_deployments_json('fake_deployments_json_service', soa_dir='fake_soa_configs') latest = sorted(deployments.iteritems(), key=lambda(key, value): value['force_bounce'], reverse=True)[0][1] desired_state = latest['desired_state'] assert desired_state == expected_state, "actual: %s\nexpected: %s" % (desired_state, expected_state)
def step_impl_then_desired_state(context, expected_state): deployments = load_deployments_json('fake_deployments_json_service', soa_dir='fake_soa_configs') latest = sorted(deployments.config_dict.items(), key=lambda kv: kv[1]['force_bounce'] or '', reverse=True)[0][1] desired_state = latest['desired_state'] assert desired_state == expected_state, f"actual: {desired_state}\nexpected: {expected_state}"