def load_chronos_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise NoConfigurationForServiceError('No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) general_config = deep_merge_dictionaries(overrides=service_chronos_jobs[instance], defaults=general_config) return ChronosJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def load_paasta_native_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): service_paasta_native_jobs = read_paasta_native_jobs_for_service( service, cluster, soa_dir=soa_dir) if instance not in service_paasta_native_jobs: filename = '%s/%s/paasta_native-%s.yaml' % (soa_dir, service, cluster) raise UnknownPaastaNativeServiceError( 'No job named "%s" in config file %s: \n%s' % (instance, filename, open(filename).read())) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) service_config = PaastaNativeServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=service_paasta_native_jobs[instance], branch_dict=branch_dict, ) service_namespace_config = load_service_namespace_config( service=service, namespace=service_config.get_nerve_namespace(), soa_dir=soa_dir) service_config.service_namespace_config = service_namespace_config return service_config
def load_paasta_native_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): service_paasta_native_jobs = read_paasta_native_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_paasta_native_jobs: filename = '%s/%s/paasta_native-%s.yaml' % (soa_dir, service, cluster) raise UnknownPaastaNativeServiceError( 'No job named "%s" in config file %s: \n%s' % (instance, filename, open(filename).read()) ) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) service_config = PaastaNativeServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=service_paasta_native_jobs[instance], branch_dict=branch_dict, ) service_namespace_config = load_service_namespace_config(service, service_config.get_nerve_namespace(), soa_dir=soa_dir) service_config.service_namespace_config = service_namespace_config return service_config
def mark_for_deployment(git_url, cluster, instance, service, commit): """Mark a docker image for deployment""" remote_branch = get_paasta_branch(cluster=cluster, instance=instance) ref_mutator = remote_git.make_force_push_mutate_refs_func( target_branches=[remote_branch], sha=commit, ) try: remote_git.create_remote_refs(git_url=git_url, ref_mutator=ref_mutator, force=True) except Exception as e: loglines = ["Failed to mark %s in for deployment on %s in the %s cluster!" % (commit, instance, cluster)] for line in str(e).split('\n'): loglines.append(line) return_code = 1 else: loglines = ["Marked %s in for deployment on %s in the %s cluster" % (commit, instance, cluster)] return_code = 0 for logline in loglines: _log( service=service, line=logline, component='deploy', level='event', cluster=cluster, instance=instance, ) return return_code
def get_branches_from_config_file(file_dir, filename): """Get all branches defined in a single service configuration file. A branch is defined for an instance if it has a 'branch' key, or the branch name is paasta-{cluster}.{instance}, where cluster is the cluster the marathon or chronos file is defined for (i.e. marathon-hab.yaml is for hab), and instance is the instance name. :param file_dir: The directory that the filename argument is in :param filename: The name of the service configuration file to read from :returns: A set of branch names listed in the configuration file """ valid_branches = set([]) config = service_configuration_lib.read_service_information(os.path.join(file_dir, filename)) for instance in config: target_branch = None if "branch" in config[instance]: target_branch = config[instance]["branch"] else: try: # cluster may contain dashes (and frequently does) so # reassemble the cluster after stripping the chronos/marathon prefix cluster = "-".join(filename.split("-")[1:]).split(".")[0] target_branch = get_paasta_branch(cluster, instance) except IndexError: pass if target_branch: valid_branches.add(target_branch) return valid_branches
def load_adhoc_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir ) adhoc_conf_file = "adhoc-%s" % cluster log.info("Reading adhoc configuration file: %s.yaml", adhoc_conf_file) instance_configs = service_configuration_lib.read_extra_service_information( service_name=service, extra_info=adhoc_conf_file, soa_dir=soa_dir ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, adhoc_conf_file) ) general_config = deep_merge_dictionaries(overrides=instance_configs[instance], defaults=general_config) branch_dict = {} if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) branch = general_config.get('branch', get_paasta_branch(cluster, instance)) deploy_group = general_config.get('deploy_group', branch) branch_dict = deployments_json.get_branch_dict_v2(service, branch, deploy_group) return AdhocJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, )
def get_branches_from_config_file(file_dir, filename): """Get all branches defined in a single service configuration file. A branch is defined for an instance if it has a 'branch' key, or the branch name is paasta-{cluster}.{instance}, where cluster is the cluster the marathon or chronos file is defined for (i.e. marathon-hab.yaml is for hab), and instance is the instance name. :param file_dir: The directory that the filename argument is in :param filename: The name of the service configuration file to read from :returns: A set of branch names listed in the configuration file """ valid_branches = set([]) config = service_configuration_lib.read_service_information( os.path.join(file_dir, filename)) for instance in config: target_branch = None if 'branch' in config[instance]: target_branch = config[instance]['branch'] else: try: # cluster may contain dashes (and frequently does) so # reassemble the cluster after stripping the chronos/marathon prefix cluster = '-'.join(filename.split('-')[1:]).split('.')[0] target_branch = get_paasta_branch(cluster, instance) except IndexError: pass if target_branch: valid_branches.add(target_branch) return valid_branches
def load_marathon_service_config_no_cache(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): """Read a service instance's configuration for marathon. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param name: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" log.info("Reading service configuration files from dir %s/ in %s" % (service, soa_dir)) log.info("Reading general configuration file: service.yaml") general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) marathon_conf_file = "marathon-%s" % cluster log.info("Reading marathon configuration file: %s.yaml", marathon_conf_file) instance_configs = service_configuration_lib.read_extra_service_information( service, marathon_conf_file, soa_dir=soa_dir, ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, marathon_conf_file)) general_config = deep_merge_dictionaries( overrides=instance_configs[instance], defaults=general_config) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = general_config.get('branch', get_paasta_branch(cluster, instance)) branch_dict = deployments_json.get_branch_dict(service, branch) return MarathonServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def load_chronos_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise InvalidChronosConfigError('No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) return ChronosJobConfig(service, instance, service_chronos_jobs[instance], branch_dict)
def _get_branch_dict(self, cluster: str, instance: str, config: Dict[Any, Any]): if self._load_deployments: if self._deployments_json is None: self._deployments_json = load_deployments_json( self._service, soa_dir=self._soa_dir) branch = config.get('branch', get_paasta_branch(cluster, instance)) return self._deployments_json.get_branch_dict( self._service, branch) else: return {}
def load_chronos_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise NoConfigurationForServiceError('No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) return ChronosJobConfig( service=service, cluster=cluster, instance=instance, config_dict=service_chronos_jobs[instance], branch_dict=branch_dict, )
def load_marathon_service_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): """Read a service instance's configuration for marathon. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param name: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" log.info("Reading service configuration files from dir %s/ in %s" % (service, soa_dir)) log.info("Reading general configuration file: service.yaml") general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir ) marathon_conf_file = "marathon-%s" % cluster log.info("Reading marathon configuration file: %s.yaml", marathon_conf_file) instance_configs = service_configuration_lib.read_extra_service_information( service, marathon_conf_file, soa_dir=soa_dir ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, marathon_conf_file) ) general_config = deep_merge_dictionaries(overrides=instance_configs[instance], defaults=general_config) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = general_config.get('branch', get_paasta_branch(cluster, instance)) branch_dict = deployments_json.get_branch_dict(service, branch) return MarathonServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, )
def write_soa_dir_deployments(context, service, disabled, instance): if disabled == 'disabled': desired_state = 'stop' else: desired_state = 'start' if not os.path.exists(os.path.join(context.soa_dir, service)): os.makedirs(os.path.join(context.soa_dir, service)) with open(os.path.join(context.soa_dir, service, 'deployments.json'), 'w') as dp: dp.write(json.dumps({ 'v1': { '%s:paasta-%s' % (service, utils.get_paasta_branch(context.cluster, instance)): { 'docker_image': 'test-image-foobar%d' % context.tag_version, 'desired_state': desired_state, } } }))
def write_soa_dir_deployments(context, service, disabled, csv_instances, image): if disabled == 'disabled': desired_state = 'stop' else: desired_state = 'start' if not os.path.exists(os.path.join(context.soa_dir, service)): os.makedirs(os.path.join(context.soa_dir, service)) with open(os.path.join(context.soa_dir, service, 'deployments.json'), 'w') as dp: dp.write(json.dumps({ 'v1': { '%s:paasta-%s' % (service, utils.get_paasta_branch(context.cluster, instance)): { 'docker_image': image, 'desired_state': desired_state, } for instance in csv_instances.split(',') } }))
def load_paasta_native_job_config( service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR, instance_type='paasta_native', config_overrides=None, ) -> NativeServiceConfig: service_paasta_native_jobs = read_service_config( service=service, instance=instance, instance_type=instance_type, cluster=cluster, soa_dir=soa_dir, ) branch_dict: BranchDict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) instance_config_dict = service_paasta_native_jobs[instance].copy() instance_config_dict.update(config_overrides or {}) service_config = NativeServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=instance_config_dict, branch_dict=branch_dict, soa_dir=soa_dir, ) service_namespace_config = load_service_namespace_config( service=service, namespace=service_config.get_nerve_namespace(), soa_dir=soa_dir, ) service_config.service_namespace_config = service_namespace_config return service_config
def write_soa_dir_deployments(context, service, disabled, csv_instances, image): if disabled == "disabled": desired_state = "stop" else: desired_state = "start" if not os.path.exists(os.path.join(context.soa_dir, service)): os.makedirs(os.path.join(context.soa_dir, service)) with open(os.path.join(context.soa_dir, service, "deployments.json"), "w") as dp: dp.write( json.dumps({ "v1": { "{}:paasta-{}".format( service, utils.get_paasta_branch(context.cluster, instance)): { "docker_image": image, "desired_state": desired_state } for instance in csv_instances.split(",") }, "v2": { "deployments": { f"{context.cluster}.{instance}": { "docker_image": image, "git_sha": "deadbeef", } for instance in csv_instances.split(",") }, "controls": { f"{service}:{context.cluster}.{instance}": { "desired_state": desired_state, "force_bounce": None, } for instance in csv_instances.split(",") }, }, }))
def write_soa_dir_deployments(context, service, disabled, csv_instances, image): if disabled == 'disabled': desired_state = 'stop' else: desired_state = 'start' if not os.path.exists(os.path.join(context.soa_dir, service)): os.makedirs(os.path.join(context.soa_dir, service)) with open(os.path.join(context.soa_dir, service, 'deployments.json'), 'w') as dp: dp.write( json.dumps({ 'v1': { '%s:paasta-%s' % (service, utils.get_paasta_branch( context.cluster, instance)): { 'docker_image': image, 'desired_state': desired_state, } for instance in csv_instances.split(',') }, 'v2': { 'deployments': { f"{context.cluster}.{instance}": { 'docker_image': image, 'git_sha': 'deadbeef', } for instance in csv_instances.split(',') }, 'controls': { f"{service}:{context.cluster}.{instance}": { 'desired_state': desired_state, 'force_bounce': None, } for instance in csv_instances.split(',') }, }, }))
def mark_for_deployment(git_url, cluster, instance, service, commit): """Mark a docker image for deployment""" remote_branch = get_paasta_branch(cluster=cluster, instance=instance) ref_mutator = remote_git.make_force_push_mutate_refs_func( target_branches=[remote_branch], sha=commit, ) try: remote_git.create_remote_refs(git_url=git_url, ref_mutator=ref_mutator, force=True) except Exception as e: loglines = [ "Failed to mark %s in for deployment on %s in the %s cluster!" % (commit, instance, cluster) ] for line in str(e).split('\n'): loglines.append(line) return_code = 1 else: loglines = [ "Marked %s in for deployment on %s in the %s cluster" % (commit, instance, cluster) ] return_code = 0 for logline in loglines: _log( service=service, line=logline, component='deploy', level='event', cluster=cluster, instance=instance, ) return return_code