def format_custom_resource( instance_config: Mapping[str, Any], service: str, instance: str, cluster: str, kind: str, version: str, group: str, ) -> Mapping[str, Any]: sanitised_service = service.replace('_', '--') sanitised_instance = instance.replace('_', '--') resource: Mapping[str, Any] = { 'apiVersion': f'{group}/{version}', 'kind': kind, 'metadata': { 'name': f'{sanitised_service}-{sanitised_instance}', 'labels': { 'yelp.com/paasta_service': service, 'yelp.com/paasta_instance': instance, 'yelp.com/paasta_cluster': cluster, }, 'annotations': { 'yelp.com/desired_state': 'running', }, }, 'spec': instance_config, } config_hash = get_config_hash(instance_config, ) resource['metadata']['labels']['yelp.com/paasta_config_sha'] = config_hash return resource
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create a job on Chronos""" system_paasta_config = load_system_paasta_config() chronos_job_config = load_chronos_job_config( service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir) docker_url = get_docker_url(system_paasta_config.get_docker_registry(), chronos_job_config.get_docker_image()) docker_volumes = system_paasta_config.get_volumes( ) + chronos_job_config.get_extra_volumes() complete_config = chronos_job_config.format_chronos_job_dict( docker_url, docker_volumes, system_paasta_config.get_dockerfile_location(), ) complete_config['name'] = compose_job_id(service, job_name) # resolve conflicts between the 'desired_state' and soa_configs disabled # flag. desired_state = chronos_job_config.get_desired_state() soa_disabled_state = complete_config['disabled'] resolved_disabled_state = determine_disabled_state(desired_state, soa_disabled_state) complete_config['disabled'] = resolved_disabled_state # we use the undocumented description field to store a hash of the chronos config. # this makes it trivial to compare configs and know when to bounce. complete_config['description'] = get_config_hash(complete_config) log.debug("Complete configuration for instance is: %s" % complete_config) return complete_config
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create a job on Chronos""" system_paasta_config = load_system_paasta_config() chronos_job_config = load_chronos_job_config( service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir) docker_url = get_docker_url( system_paasta_config.get_docker_registry(), chronos_job_config.get_docker_image()) docker_volumes = system_paasta_config.get_volumes() + chronos_job_config.get_extra_volumes() complete_config = chronos_job_config.format_chronos_job_dict( docker_url, docker_volumes, ) complete_config['name'] = compose_job_id(service, job_name) desired_state = chronos_job_config.get_desired_state() # If the job was previously stopped, we should stop the new job as well # NOTE this clobbers the 'disabled' param specified in the config file! if desired_state == 'start': complete_config['disabled'] = False elif desired_state == 'stop': complete_config['disabled'] = True # we use the undocumented description field to store a hash of the chronos config. # this makes it trivial to compare configs and know when to bounce. complete_config['description'] = get_config_hash(complete_config) log.debug("Complete configuration for instance is: %s" % complete_config) return complete_config
def create_complete_config(service, instance, marathon_config, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create an app on Marathon""" system_paasta_config = load_system_paasta_config() partial_id = format_job_id(service=service, instance=instance) instance_config = load_marathon_service_config( service=service, instance=instance, cluster=load_system_paasta_config().get_cluster(), soa_dir=soa_dir, ) docker_url = get_docker_url(system_paasta_config.get_docker_registry(), instance_config.get_docker_image()) service_namespace_config = load_service_namespace_config( service=service, namespace=instance_config.get_nerve_namespace(), ) docker_volumes = system_paasta_config.get_volumes() + instance_config.get_extra_volumes() complete_config = instance_config.format_marathon_app_dict( app_id=partial_id, docker_url=docker_url, docker_volumes=docker_volumes, service_namespace_config=service_namespace_config, ) code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash( complete_config, force_bounce=instance_config.get_force_bounce(), ) full_id = format_job_id(service, instance, code_sha, config_hash) complete_config['id'] = full_id return complete_config
def format_custom_resource( instance_config: Mapping[str, Any], service: str, instance: str, cluster: str, kind: str, version: str, group: str, namespace: str, ) -> Mapping[str, Any]: sanitised_service = sanitise_kubernetes_name(service) sanitised_instance = sanitise_kubernetes_name(instance) resource: Mapping[str, Any] = { "apiVersion": f"{group}/{version}", "kind": kind, "metadata": { "name": f"{sanitised_service}-{sanitised_instance}", "namespace": namespace, "labels": { "yelp.com/paasta_service": service, "yelp.com/paasta_instance": instance, "yelp.com/paasta_cluster": cluster, }, "annotations": { "yelp.com/desired_state": "running" }, }, "spec": instance_config, } config_hash = get_config_hash(instance_config) resource["metadata"]["labels"]["yelp.com/paasta_config_sha"] = config_hash return resource
def create_complete_config(service, instance, marathon_config, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create an app on Marathon""" # A set of config attributes that don't get included in the hash of the config. # These should be things that PaaSTA/Marathon knows how to change without requiring a bounce. CONFIG_HASH_BLACKLIST = set(['instances', 'backoff_seconds']) system_paasta_config = load_system_paasta_config() partial_id = format_job_id(service=service, instance=instance) instance_config = load_marathon_service_config( service=service, instance=instance, cluster=load_system_paasta_config().get_cluster(), soa_dir=soa_dir, ) docker_url = get_docker_url(system_paasta_config.get_docker_registry(), instance_config.get_docker_image()) service_namespace_config = load_service_namespace_config( service=service, namespace=instance_config.get_nerve_namespace(), ) docker_volumes = system_paasta_config.get_volumes() + instance_config.get_extra_volumes() complete_config = instance_config.format_marathon_app_dict( app_id=partial_id, docker_url=docker_url, docker_volumes=docker_volumes, service_namespace_config=service_namespace_config, ) code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash( {key: value for key, value in complete_config.items() if key not in CONFIG_HASH_BLACKLIST}, force_bounce=instance_config.get_force_bounce(), ) full_id = format_job_id(service, instance, code_sha, config_hash) complete_config['id'] = full_id return complete_config
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create a job on Chronos""" system_paasta_config = load_system_paasta_config() chronos_job_config = load_chronos_job_config( service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir) docker_url = get_docker_url(system_paasta_config.get_docker_registry(), chronos_job_config.get_docker_image()) docker_volumes = system_paasta_config.get_volumes( ) + chronos_job_config.get_extra_volumes() complete_config = chronos_job_config.format_chronos_job_dict( docker_url, docker_volumes, ) complete_config['name'] = compose_job_id(service, job_name) desired_state = chronos_job_config.get_desired_state() # we use the undocumented description field to store a hash of the chronos config. # this makes it trivial to compare configs and know when to bounce. complete_config['description'] = get_config_hash(complete_config) # If the job was previously stopped, we should stop the new job as well # NOTE this clobbers the 'disabled' param specified in the config file! if desired_state == 'start': complete_config['disabled'] = False elif desired_state == 'stop': complete_config['disabled'] = True log.debug("Complete configuration for instance is: %s" % complete_config) return complete_config
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create a job on Chronos""" system_paasta_config = load_system_paasta_config() chronos_job_config = load_chronos_job_config( service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir) docker_url = get_docker_url(system_paasta_config.get_docker_registry(), chronos_job_config.get_docker_image()) docker_volumes = system_paasta_config.get_volumes( ) + chronos_job_config.get_extra_volumes() complete_config = chronos_job_config.format_chronos_job_dict( docker_url, docker_volumes, ) code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash(complete_config) # Chronos clears the history for a job whenever it is updated, so we use a new job name for each revision # so that we can keep history of old job revisions rather than just the latest version full_id = compose_job_id(service, job_name, code_sha, config_hash) complete_config['name'] = full_id desired_state = chronos_job_config.get_desired_state() # If the job was previously stopped, we should stop the new job as well # NOTE this clobbers the 'disabled' param specified in the config file! if desired_state == 'start': complete_config['disabled'] = False elif desired_state == 'stop': complete_config['disabled'] = True log.debug("Complete configuration for instance is: %s" % complete_config) return complete_config
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create a job on Chronos""" system_paasta_config = load_system_paasta_config() chronos_job_config = load_chronos_job_config(service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir) docker_url = get_docker_url(system_paasta_config.get_docker_registry(), chronos_job_config.get_docker_image()) docker_volumes = system_paasta_config.get_volumes() + chronos_job_config.get_extra_volumes() complete_config = chronos_job_config.format_chronos_job_dict(docker_url, docker_volumes) code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash(complete_config) # Chronos clears the history for a job whenever it is updated, so we use a new job name for each revision # so that we can keep history of old job revisions rather than just the latest version full_id = compose_job_id(service, job_name, code_sha, config_hash) complete_config["name"] = full_id desired_state = chronos_job_config.get_desired_state() # If the job was previously stopped, we should stop the new job as well # NOTE this clobbers the 'disabled' param specified in the config file! if desired_state == "start": complete_config["disabled"] = False elif desired_state == "stop": complete_config["disabled"] = True log.debug("Complete configuration for instance is: %s" % complete_config) return complete_config
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create a job on Chronos""" system_paasta_config = load_system_paasta_config() chronos_job_config = load_chronos_job_config( service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir) docker_url = get_docker_url( system_paasta_config.get_docker_registry(), chronos_job_config.get_docker_image()) docker_volumes = system_paasta_config.get_volumes() + chronos_job_config.get_extra_volumes() complete_config = chronos_job_config.format_chronos_job_dict( docker_url, docker_volumes, system_paasta_config.get_dockercfg_location(), ) complete_config['name'] = compose_job_id(service, job_name) # resolve conflicts between the 'desired_state' and soa_configs disabled # flag. desired_state = chronos_job_config.get_desired_state() soa_disabled_state = complete_config['disabled'] resolved_disabled_state = determine_disabled_state(desired_state, soa_disabled_state) complete_config['disabled'] = resolved_disabled_state # we use the undocumented description field to store a hash of the chronos config. # this makes it trivial to compare configs and know when to bounce. complete_config['description'] = get_config_hash(complete_config) log.debug("Complete configuration for instance is: %s" % complete_config) return complete_config
def paasta_to_task_config_kwargs( service, instance, system_paasta_config, native_job_config, offer_timeout, docker_image=None, ): kwargs = { "cpus": float(native_job_config.get_cpus()), "mem": float(native_job_config.get_mem()), "disk": float(native_job_config.get_disk(10)), "uris": [system_paasta_config.get_dockercfg_location()], "environment": native_job_config.get_env_dictionary(), "containerizer": "DOCKER", "image": docker_image or native_job_config.get_docker_url(), "offer_timeout": offer_timeout, } # docker kwargs kwargs["docker_parameters"] = [ {"key": param["key"], "value": param["value"]} for param in native_job_config.format_docker_parameters() ] docker_volumes = native_job_config.get_volumes( system_volumes=system_paasta_config.get_volumes() ) kwargs["volumes"] = [ { "container_path": volume["containerPath"], "host_path": volume["hostPath"], "mode": volume["mode"].upper(), } for volume in docker_volumes ] # cmd kwarg cmd = native_job_config.get_cmd() if cmd: kwargs["cmd"] = cmd # gpus kwarg gpus = native_job_config.get_gpus() if gpus: kwargs["gpus"] = int(gpus) kwargs["containerizer"] = "MESOS" # docker containerizer does not support gpus # task name kwarg (requires everything else to hash) config_hash = get_config_hash( kwargs, force_bounce=native_job_config.get_force_bounce() ) kwargs["name"] = str( compose_job_id( service, instance, git_hash=get_code_sha_from_dockerurl(kwargs["image"]), config_hash=config_hash, spacer=MESOS_TASK_SPACER, ) ) return kwargs
def paasta_to_task_config_kwargs( service, instance, system_paasta_config, native_job_config, offer_timeout, docker_image=None, ): kwargs = { 'cpus': float(native_job_config.get_cpus()), 'mem': float(native_job_config.get_mem()), 'disk': float(native_job_config.get_disk(10)), 'uris': [system_paasta_config.get_dockercfg_location()], 'environment': native_job_config.get_env_dictionary(), 'containerizer': 'DOCKER', 'image': docker_image or native_job_config.get_docker_url(), 'offer_timeout': offer_timeout, } # docker kwargs kwargs['docker_parameters'] = [{ 'key': param['key'], 'value': param['value'] } for param in native_job_config.format_docker_parameters()] docker_volumes = native_job_config.get_volumes( system_volumes=system_paasta_config.get_volumes(), ) kwargs['volumes'] = [{ 'container_path': volume['containerPath'], 'host_path': volume['hostPath'], 'mode': volume['mode'].upper(), } for volume in docker_volumes] # cmd kwarg cmd = native_job_config.get_cmd() if cmd: kwargs['cmd'] = cmd # gpus kwarg gpus = native_job_config.get_gpus() if gpus: kwargs['gpus'] = int(gpus) kwargs[ 'containerizer'] = 'MESOS' # docker containerizer does not support gpus # task name kwarg (requires everything else to hash) config_hash = get_config_hash( kwargs, force_bounce=native_job_config.get_force_bounce(), ) kwargs['name'] = str( compose_job_id( service, instance, git_hash=get_code_sha_from_dockerurl(kwargs['image']), config_hash=config_hash, spacer=MESOS_TASK_SPACER, )) return kwargs
def format_custom_resource( instance_config: Mapping[str, Any], service: str, instance: str, cluster: str, kind: str, version: str, group: str, namespace: str, git_sha: str, ) -> Mapping[str, Any]: sanitised_service = sanitise_kubernetes_name(service) sanitised_instance = sanitise_kubernetes_name(instance) resource: Mapping[str, Any] = { "apiVersion": f"{group}/{version}", "kind": kind, "metadata": { "name": f"{sanitised_service}-{sanitised_instance}", "namespace": namespace, "labels": { "yelp.com/paasta_service": service, "yelp.com/paasta_instance": instance, "yelp.com/paasta_cluster": cluster, paasta_prefixed("service"): service, paasta_prefixed("instance"): instance, paasta_prefixed("cluster"): cluster, }, "annotations": {}, }, "spec": instance_config, } url = get_dashboard_url(kind, service, instance, cluster) if url: resource["metadata"]["annotations"]["yelp.com/dashboard_url"] = url resource["metadata"]["annotations"][paasta_prefixed( "dashboard_url")] = url config_hash = get_config_hash(resource) resource["metadata"]["annotations"]["yelp.com/desired_state"] = "running" resource["metadata"]["annotations"][paasta_prefixed( "desired_state")] = "running" resource["metadata"]["labels"]["yelp.com/paasta_config_sha"] = config_hash resource["metadata"]["labels"][paasta_prefixed("config_sha")] = config_hash resource["metadata"]["labels"][paasta_prefixed("git_sha")] = git_sha return resource
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create a job on Chronos""" system_paasta_config = load_system_paasta_config() chronos_job_config = load_chronos_job_config( service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir) docker_url = chronos_job_config.get_docker_url() docker_volumes = chronos_job_config.get_volumes( system_volumes=system_paasta_config.get_volumes()) constraints = chronos_job_config.get_calculated_constraints( system_paasta_config=system_paasta_config) complete_config = chronos_job_config.format_chronos_job_dict( docker_url=docker_url, docker_volumes=docker_volumes, docker_cfg_location=system_paasta_config.get_dockercfg_location(), constraints=constraints, ) complete_config["name"] = compose_job_id(service, job_name) # resolve conflicts between the 'desired_state' and soa_configs disabled # flag. desired_state = chronos_job_config.get_desired_state() soa_disabled_state = complete_config["disabled"] resolved_disabled_state = determine_disabled_state(desired_state, soa_disabled_state) complete_config["disabled"] = resolved_disabled_state config_for_hash = get_config_for_bounce_hash( complete_config=complete_config, service=service, soa_dir=soa_dir, system_paasta_config=system_paasta_config, ) # we use the undocumented description field to store a hash of the chronos config. # this makes it trivial to compare configs and know when to bounce. complete_config["description"] = get_config_hash( config=config_for_hash, force_bounce=chronos_job_config.get_force_bounce()) log.debug("Complete configuration for instance is: %s" % complete_config) return complete_config
def create_complete_config(service, instance, marathon_config, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create an app on Marathon""" # A set of config attributes that don't get included in the hash of the config. # These should be things that PaaSTA/Marathon knows how to change without requiring a bounce. CONFIG_HASH_BLACKLIST = set(['instances', 'backoff_seconds']) system_paasta_config = load_system_paasta_config() partial_id = format_job_id(service=service, instance=instance) instance_config = load_marathon_service_config( service=service, instance=instance, cluster=load_system_paasta_config().get_cluster(), soa_dir=soa_dir, ) docker_url = get_docker_url(system_paasta_config.get_docker_registry(), instance_config.get_docker_image()) service_namespace_config = load_service_namespace_config( service=service, namespace=instance_config.get_nerve_namespace(), ) docker_volumes = system_paasta_config.get_volumes( ) + instance_config.get_extra_volumes() complete_config = instance_config.format_marathon_app_dict( app_id=partial_id, docker_url=docker_url, docker_volumes=docker_volumes, service_namespace_config=service_namespace_config, ) code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash( { key: value for key, value in complete_config.items() if key not in CONFIG_HASH_BLACKLIST }, force_bounce=instance_config.get_force_bounce(), ) full_id = format_job_id(service, instance, code_sha, config_hash) complete_config['id'] = full_id return complete_config
def task_name(self, base_task: TaskInfo) -> str: code_sha = get_code_sha_from_dockerurl( base_task["container"]["docker"]["image"]) filled_in_task = copy.deepcopy(base_task) filled_in_task["name"] = "" filled_in_task["task_id"] = {"value": ""} filled_in_task["agent_id"] = {"value": ""} config_hash = get_config_hash(filled_in_task, force_bounce=self.get_force_bounce()) return compose_job_id( self.service, self.instance, git_hash=code_sha, config_hash=config_hash, spacer=MESOS_TASK_SPACER, )
def task_name(self, base_task): code_sha = get_code_sha_from_dockerurl(base_task.container.docker.image) filled_in_task = mesos_pb2.TaskInfo() filled_in_task.MergeFrom(base_task) filled_in_task.name = "" filled_in_task.task_id.value = "" filled_in_task.slave_id.value = "" config_hash = get_config_hash( binascii.b2a_base64(filled_in_task.SerializeToString()), force_bounce=self.get_force_bounce(), ) return compose_job_id( self.service, self.instance, git_hash=code_sha, config_hash=config_hash, spacer=MESOS_TASK_SPACER, )
def task_name(self, base_task: TaskInfo) -> str: code_sha = get_code_sha_from_dockerurl( base_task['container']['docker']['image'], ) filled_in_task = copy.deepcopy(base_task) filled_in_task['name'] = '' filled_in_task['task_id'] = {'value': ''} filled_in_task['agent_id'] = {'value': ''} config_hash = get_config_hash( filled_in_task, force_bounce=self.get_force_bounce(), ) return compose_job_id( self.service, self.instance, git_hash=code_sha, config_hash=config_hash, spacer=MESOS_TASK_SPACER, )
def task_name(self, base_task): code_sha = get_code_sha_from_dockerurl( base_task.container.docker.image) filled_in_task = mesos_pb2.TaskInfo() filled_in_task.MergeFrom(base_task) filled_in_task.name = "" filled_in_task.task_id.value = "" filled_in_task.slave_id.value = "" config_hash = get_config_hash( binascii.b2a_base64(filled_in_task.SerializeToString()), force_bounce=self.get_force_bounce(), ) return compose_job_id( self.service, self.instance, git_hash=code_sha, config_hash=config_hash, spacer=MESOS_TASK_SPACER, )
def create_complete_config(service, instance, marathon_config, soa_dir=DEFAULT_SOA_DIR): """Generates a complete dictionary to be POST'ed to create an app on Marathon""" system_paasta_config = load_system_paasta_config() partial_id = format_job_id(service=service, instance=instance) instance_config = load_marathon_service_config( service=service, instance=instance, cluster=load_system_paasta_config().get_cluster(), soa_dir=soa_dir, ) docker_url = get_docker_url(system_paasta_config.get_docker_registry(), instance_config.get_docker_image()) service_namespace_config = load_service_namespace_config( service=service, namespace=instance_config.get_nerve_namespace(), ) docker_volumes = system_paasta_config.get_volumes( ) + instance_config.get_extra_volumes() complete_config = instance_config.format_marathon_app_dict( app_id=partial_id, docker_url=docker_url, docker_volumes=docker_volumes, service_namespace_config=service_namespace_config, ) code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash( complete_config, force_bounce=instance_config.get_force_bounce(), ) full_id = format_job_id(service, instance, code_sha, config_hash) complete_config['id'] = full_id return complete_config
def task_name(self, base_task): code_sha = get_code_sha_from_dockerurl( base_task.container.docker.image) filled_in_task = copy.deepcopy(base_task) filled_in_task.update( Dict( name='', task_id=Dict(value=''), slave_id=Dict(value=''), )) config_hash = get_config_hash( filled_in_task, force_bounce=self.get_force_bounce(), ) return compose_job_id( self.service, self.instance, git_hash=code_sha, config_hash=config_hash, spacer=MESOS_TASK_SPACER, )
def paasta_to_task_config_kwargs( service, instance, native_job_config, offer_timeout, system_paasta_config, config_overrides=None, docker_image=None, ): if docker_image is None: docker_image = native_job_config.get_docker_url() docker_parameters = [{ 'key': param['key'], 'value': param['value'] } for param in native_job_config.format_docker_parameters()] # network = native_job_config.get_mesos_network_mode() docker_volumes = native_job_config.get_volumes( system_volumes=system_paasta_config.get_volumes(), ) volumes = [{ 'container_path': volume['containerPath'], 'host_path': volume['hostPath'], 'mode': volume['mode'].upper(), } for volume in docker_volumes] cmd = native_job_config.get_cmd() uris = system_paasta_config.get_dockercfg_location() cpus = native_job_config.get_cpus() mem = native_job_config.get_mem() disk = native_job_config.get_disk(10) gpus = native_job_config.get_gpus() kwargs = { 'image': str(docker_image), 'cpus': cpus, 'mem': float(mem), 'disk': float(disk), 'volumes': volumes, # 'ports': None, # 'cap_add' # 'ulimit' 'uris': [uris], 'docker_parameters': docker_parameters, 'containerizer': 'DOCKER', 'environment': native_job_config.get_env_dictionary(), 'offer_timeout': offer_timeout, } if cmd: kwargs['cmd'] = cmd if gpus > 0: kwargs['gpus'] = int(gpus) kwargs['containerizer'] = 'MESOS' config_hash = get_config_hash( kwargs, force_bounce=native_job_config.get_force_bounce(), ) kwargs['name'] = str( compose_job_id( service, instance, git_hash=get_code_sha_from_dockerurl(docker_image), config_hash=config_hash, spacer=MESOS_TASK_SPACER, )) return kwargs
def format_kubernetes_app(self) -> Union[V1Deployment, V1StatefulSet]: """Create the configuration that will be passed to the Kubernetes REST API.""" try: system_paasta_config = load_system_paasta_config() docker_url = self.get_docker_url() code_sha = get_code_sha_from_dockerurl(docker_url) if self.get_persistent_volumes(): complete_config = V1StatefulSet( api_version='apps/v1', kind='StatefulSet', metadata=self.get_kubernetes_metadata(code_sha), spec=V1StatefulSetSpec( service_name="{service}-{instance}".format( service=self.get_sanitised_service_name(), instance=self.get_sanitised_instance_name(), ), volume_claim_templates=self.get_volume_claim_templates( ), replicas=self.get_desired_instances(), selector=V1LabelSelector(match_labels={ "service": self.get_service(), "instance": self.get_instance(), }, ), template=self.get_pod_template_spec( code_sha=code_sha, system_paasta_config=system_paasta_config, ), ), ) else: complete_config = V1Deployment( api_version='apps/v1', kind='Deployment', metadata=self.get_kubernetes_metadata(code_sha), spec=V1DeploymentSpec( replicas=self.get_desired_instances(), selector=V1LabelSelector(match_labels={ "service": self.get_service(), "instance": self.get_instance(), }, ), template=self.get_pod_template_spec( code_sha=code_sha, system_paasta_config=system_paasta_config, ), strategy=self.get_deployment_strategy_config(), ), ) config_hash = get_config_hash( self.sanitize_for_config_hash(complete_config), force_bounce=self.get_force_bounce(), ) complete_config.metadata.labels['config_sha'] = config_hash complete_config.spec.template.metadata.labels[ 'config_sha'] = config_hash except Exception as e: raise InvalidKubernetesConfig(e, self.get_service(), self.get_instance()) log.debug("Complete configuration for instance is: %s", complete_config) return complete_config
def format_marathon_app_dict(self): """Create the configuration that will be passed to the Marathon REST API. Currently compiles the following keys into one nice dict: - id: the ID of the image in Marathon - container: a dict containing the docker url and docker launch options. Needed by deimos. - uris: blank. - ports: an array containing the port. - env: environment variables for the container. - mem: the amount of memory required. - cpus: the number of cpus required. - disk: the amount of disk space required. - constraints: the constraints on the Marathon app. - instances: the number of instances required. - cmd: the command to be executed. - args: an alternative to cmd that requires the docker container to have an entrypoint. The last 7 keys are retrieved using the get_<key> functions defined above. :param app_id: The app id :param docker_url: The url to the docker image the app will actually execute :param docker_volumes: The docker volumes to run the image with, via the marathon configuration file :param service_namespace_config: The service instance's configuration dict :returns: A dict containing all of the keys listed above""" system_paasta_config = load_system_paasta_config() docker_url = get_docker_url(system_paasta_config.get_docker_registry(), self.get_docker_image()) service_namespace_config = load_service_namespace_config( service=self.service, namespace=self.get_nerve_namespace(), ) docker_volumes = self.get_volumes( system_volumes=system_paasta_config.get_volumes()) net = get_mesos_network_for_net(self.get_net()) complete_config = { 'container': { 'docker': { 'image': docker_url, 'network': net, "parameters": self.format_docker_parameters(), }, 'type': 'DOCKER', 'volumes': docker_volumes, }, 'uris': [ system_paasta_config.get_dockercfg_location(), ], 'backoff_seconds': self.get_backoff_seconds(), 'backoff_factor': self.get_backoff_factor(), 'max_launch_delay_seconds': self.get_max_launch_delay_seconds(), 'health_checks': self.get_healthchecks(service_namespace_config), 'env': self.get_env(), 'mem': float(self.get_mem()), 'cpus': float(self.get_cpus()), 'disk': float(self.get_disk()), 'constraints': self.get_calculated_constraints( system_paasta_config=system_paasta_config, service_namespace_config=service_namespace_config), 'instances': self.get_desired_instances(), 'cmd': self.get_cmd(), 'args': self.get_args(), } if net == 'BRIDGE': complete_config['container']['docker']['portMappings'] = [ { 'containerPort': self.get_container_port(), 'hostPort': self.get_host_port(), 'protocol': 'tcp', }, ] else: complete_config['port_definitions'] = [ { 'port': self.get_host_port(), 'protocol': 'tcp', }, ] # Without this, we may end up with multiple containers requiring the same port on the same box. complete_config['require_ports'] = (self.get_host_port() != 0) accepted_resource_roles = self.get_accepted_resource_roles() if accepted_resource_roles is not None: complete_config[ 'accepted_resource_roles'] = accepted_resource_roles code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash( self.sanitize_for_config_hash(complete_config), force_bounce=self.get_force_bounce(), ) complete_config['id'] = format_job_id(self.service, self.instance, code_sha, config_hash) log.debug("Complete configuration for instance is: %s", complete_config) return complete_config
def format_marathon_app_dict(self): """Create the configuration that will be passed to the Marathon REST API. Currently compiles the following keys into one nice dict: - id: the ID of the image in Marathon - container: a dict containing the docker url and docker launch options. Needed by deimos. - uris: blank. - ports: an array containing the port. - env: environment variables for the container. - mem: the amount of memory required. - cpus: the number of cpus required. - disk: the amount of disk space required. - constraints: the constraints on the Marathon app. - instances: the number of instances required. - cmd: the command to be executed. - args: an alternative to cmd that requires the docker container to have an entrypoint. The last 7 keys are retrieved using the get_<key> functions defined above. :param app_id: The app id :param docker_url: The url to the docker image the app will actually execute :param docker_volumes: The docker volumes to run the image with, via the marathon configuration file :param service_namespace_config: The service instance's configuration dict :returns: A dict containing all of the keys listed above""" # A set of config attributes that don't get included in the hash of the config. # These should be things that PaaSTA/Marathon knows how to change without requiring a bounce. CONFIG_HASH_BLACKLIST = set(['instances', 'backoff_seconds']) system_paasta_config = load_system_paasta_config() docker_url = get_docker_url(system_paasta_config.get_docker_registry(), self.get_docker_image()) service_namespace_config = load_service_namespace_config( service=self.service, namespace=self.get_nerve_namespace(), ) docker_volumes = system_paasta_config.get_volumes( ) + self.get_extra_volumes() net = get_mesos_network_for_net(self.get_net()) complete_config = { 'container': { 'docker': { 'image': docker_url, 'network': net, 'portMappings': [ { 'containerPort': CONTAINER_PORT, 'hostPort': 0, 'protocol': 'tcp', }, ], "parameters": [ { "key": "memory-swap", "value": "%sm" % str(self.get_mem()) }, ] }, 'type': 'DOCKER', 'volumes': docker_volumes, }, 'uris': [ system_paasta_config.get_dockerfile_location(), ], 'backoff_seconds': self.get_backoff_seconds(), 'backoff_factor': 2, 'health_checks': self.get_healthchecks(service_namespace_config), 'env': self.get_env(), 'mem': float(self.get_mem()), 'cpus': float(self.get_cpus()), 'disk': float(self.get_disk()), 'constraints': self.get_constraints(service_namespace_config), 'instances': self.get_instances(), 'cmd': self.get_cmd(), 'args': self.get_args(), } if net == 'BRIDGE': complete_config['container']['docker']['portMappings'] = [ { 'containerPort': CONTAINER_PORT, 'hostPort': 0, 'protocol': 'tcp', }, ] accepted_resource_roles = self.get_accepted_resource_roles() if accepted_resource_roles is not None: complete_config[ 'accepted_resource_roles'] = accepted_resource_roles code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash( { key: value for key, value in complete_config.items() if key not in CONFIG_HASH_BLACKLIST }, force_bounce=self.get_force_bounce(), ) complete_config['id'] = format_job_id(self.service, self.instance, code_sha, config_hash) log.debug("Complete configuration for instance is: %s", complete_config) return complete_config
def format_marathon_app_dict(self): """Create the configuration that will be passed to the Marathon REST API. Currently compiles the following keys into one nice dict: - id: the ID of the image in Marathon - container: a dict containing the docker url and docker launch options. Needed by deimos. - uris: blank. - ports: an array containing the port. - env: environment variables for the container. - mem: the amount of memory required. - cpus: the number of cpus required. - disk: the amount of disk space required. - constraints: the constraints on the Marathon app. - instances: the number of instances required. - cmd: the command to be executed. - args: an alternative to cmd that requires the docker container to have an entrypoint. The last 7 keys are retrieved using the get_<key> functions defined above. :param app_id: The app id :param docker_url: The url to the docker image the app will actually execute :param docker_volumes: The docker volumes to run the image with, via the marathon configuration file :param service_namespace_config: The service instance's configuration dict :returns: A dict containing all of the keys listed above""" system_paasta_config = load_system_paasta_config() docker_url = get_docker_url(system_paasta_config.get_docker_registry(), self.get_docker_image()) service_namespace_config = load_service_namespace_config( service=self.service, namespace=self.get_nerve_namespace(), ) docker_volumes = system_paasta_config.get_volumes() + self.get_extra_volumes() net = get_mesos_network_for_net(self.get_net()) complete_config = { 'container': { 'docker': { 'image': docker_url, 'network': net, "parameters": self.format_docker_parameters(), }, 'type': 'DOCKER', 'volumes': docker_volumes, }, 'uris': [system_paasta_config.get_dockercfg_location(), ], 'backoff_seconds': self.get_backoff_seconds(), 'backoff_factor': self.get_backoff_factor(), 'max_launch_delay_seconds': self.get_max_launch_delay_seconds(), 'health_checks': self.get_healthchecks(service_namespace_config), 'env': self.get_env(), 'mem': float(self.get_mem()), 'cpus': float(self.get_cpus()), 'disk': float(self.get_disk()), 'constraints': self.get_calculated_constraints(service_namespace_config), 'instances': self.get_instances(), 'cmd': self.get_cmd(), 'args': self.get_args(), } if net == 'BRIDGE': complete_config['container']['docker']['portMappings'] = [ { 'containerPort': CONTAINER_PORT, 'hostPort': 0, 'protocol': 'tcp', }, ] accepted_resource_roles = self.get_accepted_resource_roles() if accepted_resource_roles is not None: complete_config['accepted_resource_roles'] = accepted_resource_roles code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash( {key: value for key, value in complete_config.items() if key not in CONFIG_HASH_BLACKLIST}, force_bounce=self.get_force_bounce(), ) complete_config['id'] = format_job_id(self.service, self.instance, code_sha, config_hash) log.debug("Complete configuration for instance is: %s", complete_config) return complete_config
def format_kubernetes_app(self) -> V1Deployment: """Create the configuration that will be passed to the Kubernetes REST API.""" system_paasta_config = load_system_paasta_config() docker_url = self.get_docker_url() # service_namespace_config = load_service_namespace_config( # service=self.service, # namespace=self.get_nerve_namespace(), # ) docker_volumes = self.get_volumes(system_volumes=system_paasta_config.get_volumes()) code_sha = get_code_sha_from_dockerurl(docker_url) complete_config = V1Deployment( metadata=V1ObjectMeta( name="{service}-{instance}".format( service=self.get_sanitised_service_name(), instance=self.get_sanitised_instance_name(), ), labels={ "service": self.get_service(), "instance": self.get_instance(), "git_sha": code_sha, }, ), spec=V1DeploymentSpec( replicas=self.get_instances(), selector=V1LabelSelector( match_labels={ "service": self.get_service(), "instance": self.get_instance(), }, ), strategy=self.get_deployment_strategy_config(), template=V1PodTemplateSpec( metadata=V1ObjectMeta( labels={ "service": self.get_service(), "instance": self.get_instance(), "git_sha": code_sha, }, ), spec=V1PodSpec( containers=self.get_kubernetes_containers( volumes=docker_volumes, system_paasta_config=system_paasta_config, ), restart_policy="Always", volumes=self.get_pod_volumes(docker_volumes), ), ), ), ) config_hash = get_config_hash( self.sanitize_for_config_hash(complete_config), force_bounce=self.get_force_bounce(), ) complete_config.metadata.labels['config_sha'] = config_hash complete_config.spec.template.metadata.labels['config_sha'] = config_hash log.debug("Complete configuration for instance is: %s", complete_config) return complete_config
def paasta_to_task_config_kwargs(service, instance, cluster, system_paasta_config, instance_type='paasta_native', soa_dir=DEFAULT_SOA_DIR, config_overrides=None): native_job_config = load_paasta_native_job_config( service, instance, cluster, soa_dir=soa_dir, instance_type=instance_type, config_overrides=config_overrides) image = native_job_config.get_docker_url() docker_parameters = [{ 'key': param['key'], 'value': param['value'] } for param in native_job_config.format_docker_parameters()] # network = native_job_config.get_mesos_network_mode() docker_volumes = native_job_config.get_volumes( system_volumes=system_paasta_config.get_volumes()) volumes = [{ 'container_path': volume['containerPath'], 'host_path': volume['hostPath'], 'mode': volume['mode'].upper(), } for volume in docker_volumes] cmd = native_job_config.get_cmd() uris = system_paasta_config.get_dockercfg_location() cpus = native_job_config.get_cpus() mem = native_job_config.get_mem() disk = native_job_config.get_disk(10) kwargs = { 'image': str(image), 'cmd': cmd, 'cpus': cpus, 'mem': float(mem), 'disk': float(disk), 'volumes': volumes, # 'ports': None, # 'cap_add' # 'ulimit' 'uris': [uris], 'docker_parameters': docker_parameters } config_hash = get_config_hash( kwargs, force_bounce=native_job_config.get_force_bounce(), ) kwargs['name'] = str( compose_job_id( service, instance, git_hash=get_code_sha_from_dockerurl(image), config_hash=config_hash, spacer=MESOS_TASK_SPACER, )) return kwargs