def format_chronos_job_dict(self, docker_url, docker_volumes, docker_cfg_location, constraints): valid, error_msgs = self.validate() if not valid: raise InvalidChronosConfigError("\n".join(error_msgs)) net = get_mesos_network_for_net(self.get_net()) complete_config = { 'name': self.get_job_name(), 'container': { 'image': docker_url, 'network': net, 'type': 'DOCKER', 'volumes': docker_volumes, 'parameters': self.format_docker_parameters(), }, 'uris': [docker_cfg_location], 'environmentVariables': self.get_env(), 'mem': self.get_mem(), 'cpus': self.get_cpus(), 'disk': self.get_disk(), 'constraints': constraints, 'command': parse_time_variables(self.get_cmd()) if self.get_cmd() else self.get_cmd(), 'arguments': self.get_args(), 'epsilon': self.get_epsilon(), 'retries': self.get_retries(), 'async': False, # we don't support async jobs 'disabled': self.get_disabled(), 'owner': self.get_owner(), 'scheduleTimeZone': self.get_schedule_time_zone(), 'shell': self.get_shell(), } if self.get_schedule() is not None: complete_config['schedule'] = self.get_schedule() else: # The input to parents is the normal paasta syntax, but for chronos we have to # convert it to what chronos expects, which uses its own spacer complete_config["parents"] = [ paasta_to_chronos_job_name(parent) for parent in self.get_parents() ] return complete_config
def format_chronos_job_dict(self, docker_url, docker_volumes, dockercfg_location): valid, error_msgs = self.validate() if not valid: raise InvalidChronosConfigError("\n".join(error_msgs)) net = get_mesos_network_for_net(self.get_net()) complete_config = { 'name': self.get_job_name().encode('utf_8'), 'container': { 'image': docker_url, 'network': net, 'type': 'DOCKER', 'volumes': docker_volumes, 'parameters': [ {"key": "memory-swap", "value": self.get_mem_swap()}, ] }, 'uris': [dockercfg_location, ], 'environmentVariables': self.get_env(), 'mem': self.get_mem(), 'cpus': self.get_cpus(), 'disk': self.get_disk(), 'constraints': self.get_constraints(), 'command': parse_time_variables(self.get_cmd()) if self.get_cmd() else self.get_cmd(), 'arguments': self.get_args(), 'epsilon': self.get_epsilon(), 'retries': self.get_retries(), 'async': False, # we don't support async jobs 'disabled': self.get_disabled(), 'owner': self.get_owner(), 'scheduleTimeZone': self.get_schedule_time_zone(), 'shell': self.get_shell(), } if self.get_schedule() is not None: complete_config['schedule'] = self.get_schedule() else: matching_parent_pairs = [(parent, get_job_for_service_instance(*parent.split("."))) for parent in self.get_parents()] for parent_pair in matching_parent_pairs: if parent_pair[1] is None: raise InvalidParentError("%s has no matching jobs in Chronos" % parent_pair[0]) complete_config['parents'] = [parent_pair[1]['name'] for parent_pair in matching_parent_pairs] return complete_config
def format_chronos_job_dict(self, docker_url, docker_volumes, dockercfg_location): valid, error_msgs = self.validate() if not valid: raise InvalidChronosConfigError("\n".join(error_msgs)) net = get_mesos_network_for_net(self.get_net()) complete_config = { 'name': self.get_job_name().encode('utf_8'), 'container': { 'image': docker_url, 'network': net, 'type': 'DOCKER', 'volumes': docker_volumes, 'parameters': self.format_docker_parameters(), }, 'uris': [dockercfg_location, ], 'environmentVariables': self.get_env(), 'mem': self.get_mem(), 'cpus': self.get_cpus(), 'disk': self.get_disk(), 'constraints': self.get_calculated_constraints(), 'command': parse_time_variables(self.get_cmd()) if self.get_cmd() else self.get_cmd(), 'arguments': self.get_args(), 'epsilon': self.get_epsilon(), 'retries': self.get_retries(), 'async': False, # we don't support async jobs 'disabled': self.get_disabled(), 'owner': self.get_owner(), 'scheduleTimeZone': self.get_schedule_time_zone(), 'shell': self.get_shell(), } if self.get_schedule() is not None: complete_config['schedule'] = self.get_schedule() else: matching_parent_pairs = [(parent, get_job_for_service_instance(*parent.split("."))) for parent in self.get_parents()] for parent_pair in matching_parent_pairs: if parent_pair[1] is None: raise InvalidParentError("%s has no matching jobs in Chronos" % parent_pair[0]) complete_config['parents'] = [parent_pair[1]['name'] for parent_pair in matching_parent_pairs] return complete_config
def format_chronos_job_dict(self, docker_url, docker_volumes, docker_cfg_location, constraints): net = get_mesos_network_for_net(self.get_net()) command = (parse_time_variables(self.get_cmd()) if self.get_cmd() else self.get_cmd()) complete_config = { "name": self.get_job_name(), "container": { "image": docker_url, "network": net, "type": "DOCKER", "volumes": docker_volumes, "parameters": self.format_docker_parameters(), }, "uris": [docker_cfg_location], "environmentVariables": self.get_env(), "mem": self.get_mem(), "cpus": self.get_cpus(), "disk": self.get_disk(), "constraints": constraints, "command": command, "arguments": self.get_args(), "epsilon": self.get_epsilon(), "retries": self.get_retries(), "async": False, # we don't support async jobs "disabled": self.get_disabled(), "owner": self.get_owner(), "scheduleTimeZone": self.get_schedule_time_zone(), "shell": self.get_shell(), } if self.get_schedule() is not None: complete_config["schedule"] = self.get_schedule() else: # The input to parents is the normal paasta syntax, but for chronos we have to # convert it to what chronos expects, which uses its own spacer complete_config["parents"] = [ paasta_to_chronos_job_name(parent) for parent in self.get_parents() ] return complete_config
def format_chronos_job_dict(self, docker_url, docker_volumes, dockercfg_location): valid, error_msgs = self.validate() if not valid: raise InvalidChronosConfigError("\n".join(error_msgs)) net = get_mesos_network_for_net(self.get_net()) complete_config = { 'name': self.get_job_name().encode('utf_8'), 'container': { 'image': docker_url, 'network': net, 'type': 'DOCKER', 'volumes': docker_volumes, 'parameters': self.format_docker_parameters(), }, 'uris': [dockercfg_location, ], 'environmentVariables': self.get_env(), 'mem': self.get_mem(), 'cpus': self.get_cpus(), 'disk': self.get_disk(), 'constraints': self.get_calculated_constraints(), 'command': parse_time_variables(self.get_cmd()) if self.get_cmd() else self.get_cmd(), 'arguments': self.get_args(), 'epsilon': self.get_epsilon(), 'retries': self.get_retries(), 'async': False, # we don't support async jobs 'disabled': self.get_disabled(), 'owner': self.get_owner(), 'scheduleTimeZone': self.get_schedule_time_zone(), 'shell': self.get_shell(), } if self.get_schedule() is not None: complete_config['schedule'] = self.get_schedule() else: # The input to parents is the normal paasta syntax, but for chronos we have to # convert it to what chronos expects, which uses its own spacer complete_config["parents"] = [paasta_to_chronos_job_name(parent) for parent in self.get_parents()] return complete_config
def format_chronos_job_dict(self, docker_url, docker_volumes, dockercfg_location): valid, error_msgs = self.validate() if not valid: raise InvalidChronosConfigError("\n".join(error_msgs)) net = get_mesos_network_for_net(self.get_net()) complete_config = { "name": self.get_job_name(), "container": { "image": docker_url, "network": net, "type": "DOCKER", "volumes": docker_volumes, "parameters": self.format_docker_parameters(), }, "uris": [dockercfg_location], "environmentVariables": self.get_env(), "mem": self.get_mem(), "cpus": self.get_cpus(), "disk": self.get_disk(), "constraints": self.get_calculated_constraints(), "command": parse_time_variables(self.get_cmd()) if self.get_cmd() else self.get_cmd(), "arguments": self.get_args(), "epsilon": self.get_epsilon(), "retries": self.get_retries(), "async": False, # we don't support async jobs "disabled": self.get_disabled(), "owner": self.get_owner(), "scheduleTimeZone": self.get_schedule_time_zone(), "shell": self.get_shell(), } if self.get_schedule() is not None: complete_config["schedule"] = self.get_schedule() else: # The input to parents is the normal paasta syntax, but for chronos we have to # convert it to what chronos expects, which uses its own spacer complete_config["parents"] = [paasta_to_chronos_job_name(parent) for parent in self.get_parents()] return complete_config
def format_marathon_app_dict(self): """Create the configuration that will be passed to the Marathon REST API. Currently compiles the following keys into one nice dict: - id: the ID of the image in Marathon - container: a dict containing the docker url and docker launch options. Needed by deimos. - uris: blank. - ports: an array containing the port. - env: environment variables for the container. - mem: the amount of memory required. - cpus: the number of cpus required. - disk: the amount of disk space required. - constraints: the constraints on the Marathon app. - instances: the number of instances required. - cmd: the command to be executed. - args: an alternative to cmd that requires the docker container to have an entrypoint. The last 7 keys are retrieved using the get_<key> functions defined above. :param app_id: The app id :param docker_url: The url to the docker image the app will actually execute :param docker_volumes: The docker volumes to run the image with, via the marathon configuration file :param service_namespace_config: The service instance's configuration dict :returns: A dict containing all of the keys listed above""" system_paasta_config = load_system_paasta_config() docker_url = get_docker_url(system_paasta_config.get_docker_registry(), self.get_docker_image()) service_namespace_config = load_service_namespace_config( service=self.service, namespace=self.get_nerve_namespace(), ) docker_volumes = self.get_volumes( system_volumes=system_paasta_config.get_volumes()) net = get_mesos_network_for_net(self.get_net()) complete_config = { 'container': { 'docker': { 'image': docker_url, 'network': net, "parameters": self.format_docker_parameters(), }, 'type': 'DOCKER', 'volumes': docker_volumes, }, 'uris': [ system_paasta_config.get_dockercfg_location(), ], 'backoff_seconds': self.get_backoff_seconds(), 'backoff_factor': self.get_backoff_factor(), 'max_launch_delay_seconds': self.get_max_launch_delay_seconds(), 'health_checks': self.get_healthchecks(service_namespace_config), 'env': self.get_env(), 'mem': float(self.get_mem()), 'cpus': float(self.get_cpus()), 'disk': float(self.get_disk()), 'constraints': self.get_calculated_constraints( system_paasta_config=system_paasta_config, service_namespace_config=service_namespace_config), 'instances': self.get_desired_instances(), 'cmd': self.get_cmd(), 'args': self.get_args(), } if net == 'BRIDGE': complete_config['container']['docker']['portMappings'] = [ { 'containerPort': self.get_container_port(), 'hostPort': self.get_host_port(), 'protocol': 'tcp', }, ] else: complete_config['port_definitions'] = [ { 'port': self.get_host_port(), 'protocol': 'tcp', }, ] # Without this, we may end up with multiple containers requiring the same port on the same box. complete_config['require_ports'] = (self.get_host_port() != 0) accepted_resource_roles = self.get_accepted_resource_roles() if accepted_resource_roles is not None: complete_config[ 'accepted_resource_roles'] = accepted_resource_roles code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash( self.sanitize_for_config_hash(complete_config), force_bounce=self.get_force_bounce(), ) complete_config['id'] = format_job_id(self.service, self.instance, code_sha, config_hash) log.debug("Complete configuration for instance is: %s", complete_config) return complete_config
def format_marathon_app_dict(self): """Create the configuration that will be passed to the Marathon REST API. Currently compiles the following keys into one nice dict: - id: the ID of the image in Marathon - container: a dict containing the docker url and docker launch options. Needed by deimos. - uris: blank. - ports: an array containing the port. - env: environment variables for the container. - mem: the amount of memory required. - cpus: the number of cpus required. - disk: the amount of disk space required. - constraints: the constraints on the Marathon app. - instances: the number of instances required. - cmd: the command to be executed. - args: an alternative to cmd that requires the docker container to have an entrypoint. The last 7 keys are retrieved using the get_<key> functions defined above. :param app_id: The app id :param docker_url: The url to the docker image the app will actually execute :param docker_volumes: The docker volumes to run the image with, via the marathon configuration file :param service_namespace_config: The service instance's configuration dict :returns: A dict containing all of the keys listed above""" system_paasta_config = load_system_paasta_config() docker_url = get_docker_url(system_paasta_config.get_docker_registry(), self.get_docker_image()) service_namespace_config = load_service_namespace_config( service=self.service, namespace=self.get_nerve_namespace(), ) docker_volumes = system_paasta_config.get_volumes() + self.get_extra_volumes() net = get_mesos_network_for_net(self.get_net()) complete_config = { 'container': { 'docker': { 'image': docker_url, 'network': net, "parameters": self.format_docker_parameters(), }, 'type': 'DOCKER', 'volumes': docker_volumes, }, 'uris': [system_paasta_config.get_dockercfg_location(), ], 'backoff_seconds': self.get_backoff_seconds(), 'backoff_factor': self.get_backoff_factor(), 'max_launch_delay_seconds': self.get_max_launch_delay_seconds(), 'health_checks': self.get_healthchecks(service_namespace_config), 'env': self.get_env(), 'mem': float(self.get_mem()), 'cpus': float(self.get_cpus()), 'disk': float(self.get_disk()), 'constraints': self.get_calculated_constraints(service_namespace_config), 'instances': self.get_instances(), 'cmd': self.get_cmd(), 'args': self.get_args(), } if net == 'BRIDGE': complete_config['container']['docker']['portMappings'] = [ { 'containerPort': CONTAINER_PORT, 'hostPort': 0, 'protocol': 'tcp', }, ] accepted_resource_roles = self.get_accepted_resource_roles() if accepted_resource_roles is not None: complete_config['accepted_resource_roles'] = accepted_resource_roles code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash( {key: value for key, value in complete_config.items() if key not in CONFIG_HASH_BLACKLIST}, force_bounce=self.get_force_bounce(), ) complete_config['id'] = format_job_id(self.service, self.instance, code_sha, config_hash) log.debug("Complete configuration for instance is: %s", complete_config) return complete_config
def format_marathon_app_dict(self): """Create the configuration that will be passed to the Marathon REST API. Currently compiles the following keys into one nice dict: - id: the ID of the image in Marathon - container: a dict containing the docker url and docker launch options. Needed by deimos. - uris: blank. - ports: an array containing the port. - env: environment variables for the container. - mem: the amount of memory required. - cpus: the number of cpus required. - disk: the amount of disk space required. - constraints: the constraints on the Marathon app. - instances: the number of instances required. - cmd: the command to be executed. - args: an alternative to cmd that requires the docker container to have an entrypoint. The last 7 keys are retrieved using the get_<key> functions defined above. :param app_id: The app id :param docker_url: The url to the docker image the app will actually execute :param docker_volumes: The docker volumes to run the image with, via the marathon configuration file :param service_namespace_config: The service instance's configuration dict :returns: A dict containing all of the keys listed above""" # A set of config attributes that don't get included in the hash of the config. # These should be things that PaaSTA/Marathon knows how to change without requiring a bounce. CONFIG_HASH_BLACKLIST = set(['instances', 'backoff_seconds']) system_paasta_config = load_system_paasta_config() docker_url = get_docker_url(system_paasta_config.get_docker_registry(), self.get_docker_image()) service_namespace_config = load_service_namespace_config( service=self.service, namespace=self.get_nerve_namespace(), ) docker_volumes = system_paasta_config.get_volumes( ) + self.get_extra_volumes() net = get_mesos_network_for_net(self.get_net()) complete_config = { 'container': { 'docker': { 'image': docker_url, 'network': net, 'portMappings': [ { 'containerPort': CONTAINER_PORT, 'hostPort': 0, 'protocol': 'tcp', }, ], "parameters": [ { "key": "memory-swap", "value": "%sm" % str(self.get_mem()) }, ] }, 'type': 'DOCKER', 'volumes': docker_volumes, }, 'uris': [ system_paasta_config.get_dockerfile_location(), ], 'backoff_seconds': self.get_backoff_seconds(), 'backoff_factor': 2, 'health_checks': self.get_healthchecks(service_namespace_config), 'env': self.get_env(), 'mem': float(self.get_mem()), 'cpus': float(self.get_cpus()), 'disk': float(self.get_disk()), 'constraints': self.get_constraints(service_namespace_config), 'instances': self.get_instances(), 'cmd': self.get_cmd(), 'args': self.get_args(), } if net == 'BRIDGE': complete_config['container']['docker']['portMappings'] = [ { 'containerPort': CONTAINER_PORT, 'hostPort': 0, 'protocol': 'tcp', }, ] accepted_resource_roles = self.get_accepted_resource_roles() if accepted_resource_roles is not None: complete_config[ 'accepted_resource_roles'] = accepted_resource_roles code_sha = get_code_sha_from_dockerurl(docker_url) config_hash = get_config_hash( { key: value for key, value in complete_config.items() if key not in CONFIG_HASH_BLACKLIST }, force_bounce=self.get_force_bounce(), ) complete_config['id'] = format_job_id(self.service, self.instance, code_sha, config_hash) log.debug("Complete configuration for instance is: %s", complete_config) return complete_config