Пример #1
0
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR):
    """Generates a complete dictionary to be POST'ed to create a job on Chronos"""
    system_paasta_config = load_system_paasta_config()
    chronos_job_config = load_chronos_job_config(
        service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir)
    docker_url = get_docker_url(system_paasta_config.get_docker_registry(),
                                chronos_job_config.get_docker_image())
    docker_volumes = system_paasta_config.get_volumes(
    ) + chronos_job_config.get_extra_volumes()

    complete_config = chronos_job_config.format_chronos_job_dict(
        docker_url,
        docker_volumes,
    )

    complete_config['name'] = compose_job_id(service, job_name)
    desired_state = chronos_job_config.get_desired_state()

    # we use the undocumented description field to store a hash of the chronos config.
    # this makes it trivial to compare configs and know when to bounce.
    complete_config['description'] = get_config_hash(complete_config)

    # If the job was previously stopped, we should stop the new job as well
    # NOTE this clobbers the 'disabled' param specified in the config file!
    if desired_state == 'start':
        complete_config['disabled'] = False
    elif desired_state == 'stop':
        complete_config['disabled'] = True

    log.debug("Complete configuration for instance is: %s" % complete_config)
    return complete_config
Пример #2
0
def create_complete_config(service, instance, marathon_config, soa_dir=DEFAULT_SOA_DIR):
    """Generates a complete dictionary to be POST'ed to create an app on Marathon"""
    # A set of config attributes that don't get included in the hash of the config.
    # These should be things that PaaSTA/Marathon knows how to change without requiring a bounce.
    CONFIG_HASH_BLACKLIST = set(['instances', 'backoff_seconds'])

    system_paasta_config = load_system_paasta_config()
    partial_id = format_job_id(service=service, instance=instance)
    instance_config = load_marathon_service_config(
        service=service,
        instance=instance,
        cluster=load_system_paasta_config().get_cluster(),
        soa_dir=soa_dir,
    )
    docker_url = get_docker_url(system_paasta_config.get_docker_registry(), instance_config.get_docker_image())
    service_namespace_config = load_service_namespace_config(
        service=service,
        namespace=instance_config.get_nerve_namespace(),
    )
    docker_volumes = system_paasta_config.get_volumes() + instance_config.get_extra_volumes()

    complete_config = instance_config.format_marathon_app_dict(
        app_id=partial_id,
        docker_url=docker_url,
        docker_volumes=docker_volumes,
        service_namespace_config=service_namespace_config,
    )
    code_sha = get_code_sha_from_dockerurl(docker_url)
    config_hash = get_config_hash(
        {key: value for key, value in complete_config.items() if key not in CONFIG_HASH_BLACKLIST},
        force_bounce=instance_config.get_force_bounce(),
    )
    full_id = format_job_id(service, instance, code_sha, config_hash)
    complete_config['id'] = full_id
    return complete_config
Пример #3
0
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR):
    """Generates a complete dictionary to be POST'ed to create a job on Chronos"""
    system_paasta_config = load_system_paasta_config()
    chronos_job_config = load_chronos_job_config(
        service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir)
    docker_url = get_docker_url(
        system_paasta_config.get_docker_registry(), chronos_job_config.get_docker_image())
    docker_volumes = system_paasta_config.get_volumes() + chronos_job_config.get_extra_volumes()

    complete_config = chronos_job_config.format_chronos_job_dict(
        docker_url,
        docker_volumes,
        system_paasta_config.get_dockercfg_location(),
    )

    complete_config['name'] = compose_job_id(service, job_name)

    # resolve conflicts between the 'desired_state' and soa_configs disabled
    # flag.
    desired_state = chronos_job_config.get_desired_state()
    soa_disabled_state = complete_config['disabled']

    resolved_disabled_state = determine_disabled_state(desired_state,
                                                       soa_disabled_state)
    complete_config['disabled'] = resolved_disabled_state

    # we use the undocumented description field to store a hash of the chronos config.
    # this makes it trivial to compare configs and know when to bounce.
    complete_config['description'] = get_config_hash(complete_config)

    log.debug("Complete configuration for instance is: %s" % complete_config)
    return complete_config
Пример #4
0
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR):
    """Generates a complete dictionary to be POST'ed to create a job on Chronos"""
    system_paasta_config = load_system_paasta_config()
    chronos_job_config = load_chronos_job_config(
        service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir)
    docker_url = get_docker_url(system_paasta_config.get_docker_registry(),
                                chronos_job_config.get_docker_image())
    docker_volumes = system_paasta_config.get_volumes(
    ) + chronos_job_config.get_extra_volumes()

    complete_config = chronos_job_config.format_chronos_job_dict(
        docker_url,
        docker_volumes,
    )
    code_sha = get_code_sha_from_dockerurl(docker_url)
    config_hash = get_config_hash(complete_config)

    # Chronos clears the history for a job whenever it is updated, so we use a new job name for each revision
    # so that we can keep history of old job revisions rather than just the latest version
    full_id = compose_job_id(service, job_name, code_sha, config_hash)
    complete_config['name'] = full_id
    desired_state = chronos_job_config.get_desired_state()

    # If the job was previously stopped, we should stop the new job as well
    # NOTE this clobbers the 'disabled' param specified in the config file!
    if desired_state == 'start':
        complete_config['disabled'] = False
    elif desired_state == 'stop':
        complete_config['disabled'] = True

    log.debug("Complete configuration for instance is: %s" % complete_config)
    return complete_config
Пример #5
0
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR):
    """Generates a complete dictionary to be POST'ed to create a job on Chronos"""
    system_paasta_config = load_system_paasta_config()
    chronos_job_config = load_chronos_job_config(service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir)
    docker_url = get_docker_url(system_paasta_config.get_docker_registry(), chronos_job_config.get_docker_image())
    docker_volumes = system_paasta_config.get_volumes() + chronos_job_config.get_extra_volumes()

    complete_config = chronos_job_config.format_chronos_job_dict(docker_url, docker_volumes)
    code_sha = get_code_sha_from_dockerurl(docker_url)
    config_hash = get_config_hash(complete_config)

    # Chronos clears the history for a job whenever it is updated, so we use a new job name for each revision
    # so that we can keep history of old job revisions rather than just the latest version
    full_id = compose_job_id(service, job_name, code_sha, config_hash)
    complete_config["name"] = full_id
    desired_state = chronos_job_config.get_desired_state()

    # If the job was previously stopped, we should stop the new job as well
    # NOTE this clobbers the 'disabled' param specified in the config file!
    if desired_state == "start":
        complete_config["disabled"] = False
    elif desired_state == "stop":
        complete_config["disabled"] = True

    log.debug("Complete configuration for instance is: %s" % complete_config)
    return complete_config
Пример #6
0
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR):
    """Generates a complete dictionary to be POST'ed to create a job on Chronos"""
    system_paasta_config = load_system_paasta_config()
    chronos_job_config = load_chronos_job_config(
        service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir)
    docker_url = get_docker_url(system_paasta_config.get_docker_registry(),
                                chronos_job_config.get_docker_image())
    docker_volumes = system_paasta_config.get_volumes(
    ) + chronos_job_config.get_extra_volumes()

    complete_config = chronos_job_config.format_chronos_job_dict(
        docker_url,
        docker_volumes,
        system_paasta_config.get_dockerfile_location(),
    )

    complete_config['name'] = compose_job_id(service, job_name)

    # resolve conflicts between the 'desired_state' and soa_configs disabled
    # flag.
    desired_state = chronos_job_config.get_desired_state()
    soa_disabled_state = complete_config['disabled']

    resolved_disabled_state = determine_disabled_state(desired_state,
                                                       soa_disabled_state)
    complete_config['disabled'] = resolved_disabled_state

    # we use the undocumented description field to store a hash of the chronos config.
    # this makes it trivial to compare configs and know when to bounce.
    complete_config['description'] = get_config_hash(complete_config)

    log.debug("Complete configuration for instance is: %s" % complete_config)
    return complete_config
Пример #7
0
def create_complete_config(service, job_name, soa_dir=DEFAULT_SOA_DIR):
    """Generates a complete dictionary to be POST'ed to create a job on Chronos"""
    system_paasta_config = load_system_paasta_config()
    chronos_job_config = load_chronos_job_config(
        service, job_name, system_paasta_config.get_cluster(), soa_dir=soa_dir)
    docker_url = get_docker_url(
        system_paasta_config.get_docker_registry(), chronos_job_config.get_docker_image())
    docker_volumes = system_paasta_config.get_volumes() + chronos_job_config.get_extra_volumes()

    complete_config = chronos_job_config.format_chronos_job_dict(
        docker_url,
        docker_volumes,
    )

    complete_config['name'] = compose_job_id(service, job_name)
    desired_state = chronos_job_config.get_desired_state()

    # If the job was previously stopped, we should stop the new job as well
    # NOTE this clobbers the 'disabled' param specified in the config file!
    if desired_state == 'start':
        complete_config['disabled'] = False
    elif desired_state == 'stop':
        complete_config['disabled'] = True

    # we use the undocumented description field to store a hash of the chronos config.
    # this makes it trivial to compare configs and know when to bounce.
    complete_config['description'] = get_config_hash(complete_config)

    log.debug("Complete configuration for instance is: %s" % complete_config)
    return complete_config
Пример #8
0
def create_complete_config(service, instance, marathon_config, soa_dir=DEFAULT_SOA_DIR):
    """Generates a complete dictionary to be POST'ed to create an app on Marathon"""
    system_paasta_config = load_system_paasta_config()
    partial_id = format_job_id(service=service, instance=instance)
    instance_config = load_marathon_service_config(
        service=service,
        instance=instance,
        cluster=load_system_paasta_config().get_cluster(),
        soa_dir=soa_dir,
    )
    docker_url = get_docker_url(system_paasta_config.get_docker_registry(), instance_config.get_docker_image())
    service_namespace_config = load_service_namespace_config(
        service=service,
        namespace=instance_config.get_nerve_namespace(),
    )
    docker_volumes = system_paasta_config.get_volumes() + instance_config.get_extra_volumes()

    complete_config = instance_config.format_marathon_app_dict(
        app_id=partial_id,
        docker_url=docker_url,
        docker_volumes=docker_volumes,
        service_namespace_config=service_namespace_config,
    )
    code_sha = get_code_sha_from_dockerurl(docker_url)
    config_hash = get_config_hash(
        complete_config,
        force_bounce=instance_config.get_force_bounce(),
    )
    full_id = format_job_id(service, instance, code_sha, config_hash)
    complete_config['id'] = full_id
    return complete_config
    def base_task(self, system_paasta_config, portMappings=True):
        """Return a TaskInfo protobuf with all the fields corresponding to the configuration filled in. Does not
        include task.slave_id or a task.id; those need to be computed separately."""
        task = mesos_pb2.TaskInfo()
        task.container.type = mesos_pb2.ContainerInfo.DOCKER
        task.container.docker.image = get_docker_url(
            system_paasta_config.get_docker_registry(),
            self.get_docker_image())

        for param in self.format_docker_parameters():
            p = task.container.docker.parameters.add()
            p.key = param['key']
            p.value = param['value']

        task.container.docker.network = self.get_mesos_network_mode()

        docker_volumes = self.get_volumes(
            system_volumes=system_paasta_config.get_volumes())
        for volume in docker_volumes:
            v = task.container.volumes.add()
            v.mode = getattr(mesos_pb2.Volume, volume['mode'].upper())
            v.container_path = volume['containerPath']
            v.host_path = volume['hostPath']

        task.command.value = self.get_cmd()
        cpus = task.resources.add()
        cpus.name = "cpus"
        cpus.type = mesos_pb2.Value.SCALAR
        cpus.scalar.value = self.get_cpus()
        mem = task.resources.add()
        mem.name = "mem"
        mem.type = mesos_pb2.Value.SCALAR
        mem.scalar.value = self.get_mem()

        if portMappings:
            pm = task.container.docker.port_mappings.add()
            pm.container_port = self.get_container_port()
            pm.host_port = 0  # will be filled in by tasks_and_state_for_offer()
            pm.protocol = "tcp"

            port = task.resources.add()
            port.name = "ports"
            port.type = mesos_pb2.Value.RANGES
            port.ranges.range.add()
            port.ranges.range[
                0].begin = 0  # will be filled in by tasks_and_state_for_offer().
            port.ranges.range[
                0].end = 0  # will be filled in by tasks_and_state_for_offer().

        task.name = self.task_name(task)

        docker_cfg_uri = task.command.uris.add()
        docker_cfg_uri.value = system_paasta_config.get_dockercfg_location()
        docker_cfg_uri.extract = False

        return task
Пример #10
0
def create_complete_config(service,
                           instance,
                           marathon_config,
                           soa_dir=DEFAULT_SOA_DIR):
    """Generates a complete dictionary to be POST'ed to create an app on Marathon"""
    # A set of config attributes that don't get included in the hash of the config.
    # These should be things that PaaSTA/Marathon knows how to change without requiring a bounce.
    CONFIG_HASH_BLACKLIST = set(['instances', 'backoff_seconds'])

    system_paasta_config = load_system_paasta_config()
    partial_id = format_job_id(service=service, instance=instance)
    instance_config = load_marathon_service_config(
        service=service,
        instance=instance,
        cluster=load_system_paasta_config().get_cluster(),
        soa_dir=soa_dir,
    )
    docker_url = get_docker_url(system_paasta_config.get_docker_registry(),
                                instance_config.get_docker_image())
    service_namespace_config = load_service_namespace_config(
        service=service,
        namespace=instance_config.get_nerve_namespace(),
    )
    docker_volumes = system_paasta_config.get_volumes(
    ) + instance_config.get_extra_volumes()

    complete_config = instance_config.format_marathon_app_dict(
        app_id=partial_id,
        docker_url=docker_url,
        docker_volumes=docker_volumes,
        service_namespace_config=service_namespace_config,
    )
    code_sha = get_code_sha_from_dockerurl(docker_url)
    config_hash = get_config_hash(
        {
            key: value
            for key, value in complete_config.items()
            if key not in CONFIG_HASH_BLACKLIST
        },
        force_bounce=instance_config.get_force_bounce(),
    )
    full_id = format_job_id(service, instance, code_sha, config_hash)
    complete_config['id'] = full_id
    return complete_config
Пример #11
0
    def base_task(self, system_paasta_config):
        """Return a TaskInfo protobuf with all the fields corresponding to the configuration filled in. Does not
        include task.slave_id or a task.id; those need to be computed separately."""
        task = mesos_pb2.TaskInfo()
        task.container.type = mesos_pb2.ContainerInfo.DOCKER
        task.container.docker.image = get_docker_url(system_paasta_config.get_docker_registry(),
                                                     self.get_docker_image())
        task.command.value = self.get_cmd()
        cpus = task.resources.add()
        cpus.name = "cpus"
        cpus.type = mesos_pb2.Value.SCALAR
        cpus.scalar.value = self.get_cpus()
        mem = task.resources.add()
        mem.name = "mem"
        mem.type = mesos_pb2.Value.SCALAR
        mem.scalar.value = self.get_mem()

        task.name = self.task_name(task)

        return task
Пример #12
0
    def base_task(self, system_paasta_config):
        """Return a TaskInfo protobuf with all the fields corresponding to the configuration filled in. Does not
        include task.slave_id or a task.id; those need to be computed separately."""
        task = mesos_pb2.TaskInfo()
        task.container.type = mesos_pb2.ContainerInfo.DOCKER
        task.container.docker.image = get_docker_url(
            system_paasta_config.get_docker_registry(),
            self.get_docker_image())
        task.command.value = self.get_cmd()
        cpus = task.resources.add()
        cpus.name = "cpus"
        cpus.type = mesos_pb2.Value.SCALAR
        cpus.scalar.value = self.get_cpus()
        mem = task.resources.add()
        mem.name = "mem"
        mem.type = mesos_pb2.Value.SCALAR
        mem.scalar.value = self.get_mem()

        task.name = self.task_name(task)

        return task
Пример #13
0
def create_complete_config(service,
                           instance,
                           marathon_config,
                           soa_dir=DEFAULT_SOA_DIR):
    """Generates a complete dictionary to be POST'ed to create an app on Marathon"""
    system_paasta_config = load_system_paasta_config()
    partial_id = format_job_id(service=service, instance=instance)
    instance_config = load_marathon_service_config(
        service=service,
        instance=instance,
        cluster=load_system_paasta_config().get_cluster(),
        soa_dir=soa_dir,
    )
    docker_url = get_docker_url(system_paasta_config.get_docker_registry(),
                                instance_config.get_docker_image())
    service_namespace_config = load_service_namespace_config(
        service=service,
        namespace=instance_config.get_nerve_namespace(),
    )
    docker_volumes = system_paasta_config.get_volumes(
    ) + instance_config.get_extra_volumes()

    complete_config = instance_config.format_marathon_app_dict(
        app_id=partial_id,
        docker_url=docker_url,
        docker_volumes=docker_volumes,
        service_namespace_config=service_namespace_config,
    )
    code_sha = get_code_sha_from_dockerurl(docker_url)
    config_hash = get_config_hash(
        complete_config,
        force_bounce=instance_config.get_force_bounce(),
    )
    full_id = format_job_id(service, instance, code_sha, config_hash)
    complete_config['id'] = full_id
    return complete_config
Пример #14
0
    def format_marathon_app_dict(self):
        """Create the configuration that will be passed to the Marathon REST API.

        Currently compiles the following keys into one nice dict:

        - id: the ID of the image in Marathon
        - container: a dict containing the docker url and docker launch options. Needed by deimos.
        - uris: blank.
        - ports: an array containing the port.
        - env: environment variables for the container.
        - mem: the amount of memory required.
        - cpus: the number of cpus required.
        - disk: the amount of disk space required.
        - constraints: the constraints on the Marathon app.
        - instances: the number of instances required.
        - cmd: the command to be executed.
        - args: an alternative to cmd that requires the docker container to have an entrypoint.

        The last 7 keys are retrieved using the get_<key> functions defined above.

        :param app_id: The app id
        :param docker_url: The url to the docker image the app will actually execute
        :param docker_volumes: The docker volumes to run the image with, via the
                               marathon configuration file
        :param service_namespace_config: The service instance's configuration dict
        :returns: A dict containing all of the keys listed above"""

        system_paasta_config = load_system_paasta_config()
        docker_url = get_docker_url(system_paasta_config.get_docker_registry(),
                                    self.get_docker_image())
        service_namespace_config = load_service_namespace_config(
            service=self.service,
            namespace=self.get_nerve_namespace(),
        )
        docker_volumes = self.get_volumes(
            system_volumes=system_paasta_config.get_volumes())

        net = get_mesos_network_for_net(self.get_net())

        complete_config = {
            'container': {
                'docker': {
                    'image': docker_url,
                    'network': net,
                    "parameters": self.format_docker_parameters(),
                },
                'type': 'DOCKER',
                'volumes': docker_volumes,
            },
            'uris': [
                system_paasta_config.get_dockercfg_location(),
            ],
            'backoff_seconds':
            self.get_backoff_seconds(),
            'backoff_factor':
            self.get_backoff_factor(),
            'max_launch_delay_seconds':
            self.get_max_launch_delay_seconds(),
            'health_checks':
            self.get_healthchecks(service_namespace_config),
            'env':
            self.get_env(),
            'mem':
            float(self.get_mem()),
            'cpus':
            float(self.get_cpus()),
            'disk':
            float(self.get_disk()),
            'constraints':
            self.get_calculated_constraints(
                system_paasta_config=system_paasta_config,
                service_namespace_config=service_namespace_config),
            'instances':
            self.get_desired_instances(),
            'cmd':
            self.get_cmd(),
            'args':
            self.get_args(),
        }

        if net == 'BRIDGE':
            complete_config['container']['docker']['portMappings'] = [
                {
                    'containerPort': self.get_container_port(),
                    'hostPort': self.get_host_port(),
                    'protocol': 'tcp',
                },
            ]
        else:
            complete_config['port_definitions'] = [
                {
                    'port': self.get_host_port(),
                    'protocol': 'tcp',
                },
            ]
            # Without this, we may end up with multiple containers requiring the same port on the same box.
            complete_config['require_ports'] = (self.get_host_port() != 0)

        accepted_resource_roles = self.get_accepted_resource_roles()
        if accepted_resource_roles is not None:
            complete_config[
                'accepted_resource_roles'] = accepted_resource_roles

        code_sha = get_code_sha_from_dockerurl(docker_url)

        config_hash = get_config_hash(
            self.sanitize_for_config_hash(complete_config),
            force_bounce=self.get_force_bounce(),
        )
        complete_config['id'] = format_job_id(self.service, self.instance,
                                              code_sha, config_hash)

        log.debug("Complete configuration for instance is: %s",
                  complete_config)
        return complete_config
Пример #15
0
    def format_marathon_app_dict(self):
        """Create the configuration that will be passed to the Marathon REST API.

        Currently compiles the following keys into one nice dict:

        - id: the ID of the image in Marathon
        - container: a dict containing the docker url and docker launch options. Needed by deimos.
        - uris: blank.
        - ports: an array containing the port.
        - env: environment variables for the container.
        - mem: the amount of memory required.
        - cpus: the number of cpus required.
        - disk: the amount of disk space required.
        - constraints: the constraints on the Marathon app.
        - instances: the number of instances required.
        - cmd: the command to be executed.
        - args: an alternative to cmd that requires the docker container to have an entrypoint.

        The last 7 keys are retrieved using the get_<key> functions defined above.

        :param app_id: The app id
        :param docker_url: The url to the docker image the app will actually execute
        :param docker_volumes: The docker volumes to run the image with, via the
                               marathon configuration file
        :param service_namespace_config: The service instance's configuration dict
        :returns: A dict containing all of the keys listed above"""

        system_paasta_config = load_system_paasta_config()
        docker_url = get_docker_url(system_paasta_config.get_docker_registry(), self.get_docker_image())
        service_namespace_config = load_service_namespace_config(
            service=self.service,
            namespace=self.get_nerve_namespace(),
        )
        docker_volumes = system_paasta_config.get_volumes() + self.get_extra_volumes()

        net = get_mesos_network_for_net(self.get_net())

        complete_config = {
            'container': {
                'docker': {
                    'image': docker_url,
                    'network': net,
                    "parameters": self.format_docker_parameters(),
                },
                'type': 'DOCKER',
                'volumes': docker_volumes,
            },
            'uris': [system_paasta_config.get_dockercfg_location(), ],
            'backoff_seconds': self.get_backoff_seconds(),
            'backoff_factor': self.get_backoff_factor(),
            'max_launch_delay_seconds': self.get_max_launch_delay_seconds(),
            'health_checks': self.get_healthchecks(service_namespace_config),
            'env': self.get_env(),
            'mem': float(self.get_mem()),
            'cpus': float(self.get_cpus()),
            'disk': float(self.get_disk()),
            'constraints': self.get_calculated_constraints(service_namespace_config),
            'instances': self.get_instances(),
            'cmd': self.get_cmd(),
            'args': self.get_args(),
        }

        if net == 'BRIDGE':
            complete_config['container']['docker']['portMappings'] = [
                {
                    'containerPort': CONTAINER_PORT,
                    'hostPort': 0,
                    'protocol': 'tcp',
                },
            ]

        accepted_resource_roles = self.get_accepted_resource_roles()
        if accepted_resource_roles is not None:
            complete_config['accepted_resource_roles'] = accepted_resource_roles

        code_sha = get_code_sha_from_dockerurl(docker_url)

        config_hash = get_config_hash(
            {key: value for key, value in complete_config.items() if key not in CONFIG_HASH_BLACKLIST},
            force_bounce=self.get_force_bounce(),
        )
        complete_config['id'] = format_job_id(self.service, self.instance, code_sha, config_hash)

        log.debug("Complete configuration for instance is: %s", complete_config)
        return complete_config
Пример #16
0
def configure_and_run_docker_container(docker_client,
                                       docker_hash,
                                       service,
                                       instance,
                                       cluster,
                                       args,
                                       pull_image=False):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        sys.stdout.write(
            PaastaColors.yellow(
                "Warning: Couldn't load config files from '/etc/paasta'. This indicates\n"
                "PaaSTA is not configured locally on this host, and local-run may not behave\n"
                "the same way it would behave on a server configured for PaaSTA.\n"
            ))
        system_paasta_config = SystemPaastaConfig({"volumes": []},
                                                  '/etc/paasta')

    volumes = list()
    instance_config = get_instance_config(
        service=service,
        instance=instance,
        cluster=cluster,
        load_deployments=pull_image,
        soa_dir=args.yelpsoa_config_root,
    )

    if pull_image:
        docker_url = get_docker_url(system_paasta_config.get_docker_registry(),
                                    instance_config.get_docker_image())
        docker_pull_image(docker_url)

        docker_hash = docker_url

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' %
                       (volume['hostPath'], volume['containerPath'],
                        volume['mode'].lower()))

    if args.interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command = shlex.split(command_from_config)
        else:
            command = instance_config.get_args()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=args.interactive,
        command=command,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
    )
Пример #17
0
def configure_and_run_docker_container(docker_client,
                                       docker_hash,
                                       service,
                                       instance,
                                       cluster,
                                       system_paasta_config,
                                       args,
                                       pull_image=False,
                                       dry_run=False):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    if instance is None and args.healthcheck_only:
        paasta_print(
            "With --healthcheck-only, --instance MUST be provided!",
            file=sys.stderr,
        )
        return 1
    if instance is None and not sys.stdin.isatty():
        paasta_print(
            "--instance and --cluster must be specified when using paasta local-run without a tty!",
            file=sys.stderr,
        )
        return 1

    soa_dir = args.yelpsoa_config_root
    volumes = list()
    load_deployments = docker_hash is None or pull_image
    interactive = args.interactive

    try:
        if instance is None:
            instance_type = 'adhoc'
            instance = 'interactive'
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(service, instance,
                                                      cluster, soa_dir)
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        paasta_print(str(e), file=sys.stderr)
        return 1
    except NoDeploymentsAvailable:
        paasta_print(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s."
                "You can generate this by running:"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s"
                % {
                    'soa_dir': soa_dir,
                    'service': service,
                }),
            sep='\n',
            file=sys.stderr,
        )
        return 1

    if docker_hash is None:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(),
                instance_config.get_docker_image())
        except NoDockerImageError:
            paasta_print(
                PaastaColors.red(
                    "Error: No sha has been marked for deployment for the %s deploy group.\n"
                    "Please ensure this service has either run through a jenkins pipeline "
                    "or paasta mark-for-deployment has been run for %s\n" %
                    (instance_config.get_deploy_group(), service)),
                sep='',
                file=sys.stderr,
            )
            return 1
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' %
                       (volume['hostPath'], volume['containerPath'],
                        volume['mode'].lower()))

    if interactive is True and args.cmd is None:
        command = 'bash'
    elif args.cmd:
        command = args.cmd
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = command_modifier(command_from_config)
        else:
            command = instance_config.get_args()

    return run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=interactive,
        command=command,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        user_port=args.user_port,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
        framework=instance_type,
    )
Пример #18
0
def test_get_docker_url_no_error():
    fake_registry = "im.a-real.vm"
    fake_image = "and-i-can-run:1.0"
    expected = "%s/%s" % (fake_registry, fake_image)
    assert utils.get_docker_url(fake_registry, fake_image) == expected
Пример #19
0
def test_get_docker_url_with_no_docker_image():
    with raises(utils.NoDockerImageError):
        utils.get_docker_url('fake_registry', None)
Пример #20
0
def configure_and_run_docker_container(
        docker_client,
        docker_hash,
        service,
        instance,
        cluster,
        system_paasta_config,
        args,
        pull_image=False,
        dry_run=False
):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    load_deployments = docker_hash is None or pull_image

    interactive = args.interactive

    try:
        if instance is None and args.healthcheck:
            sys.stderr.write("With --healthcheck, --instance must be provided!\n")
            sys.exit(1)
        if instance is None:
            instance_type = 'adhoc'
            instance = 'interactive'
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(service, instance, cluster, soa_dir)
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        sys.stderr.write(str(e) + '\n')
        return
    except NoDeploymentsAvailable:
        sys.stderr.write(PaastaColors.red(
            "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
            "You can generate this by running:\n"
            "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n" % {
                'soa_dir': soa_dir, 'service': service}))
        return

    if docker_hash is None:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(), instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(PaastaColors.red(
                "Error: No sha has been marked for deployment for the %s deploy group.\n"
                "Please ensure this service has either run through a jenkins pipeline "
                "or paasta mark-for-deployment has been run for %s\n" % (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))

    if interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config), posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
    )
Пример #21
0
    def format_marathon_app_dict(self):
        """Create the configuration that will be passed to the Marathon REST API.

        Currently compiles the following keys into one nice dict:

        - id: the ID of the image in Marathon
        - container: a dict containing the docker url and docker launch options. Needed by deimos.
        - uris: blank.
        - ports: an array containing the port.
        - env: environment variables for the container.
        - mem: the amount of memory required.
        - cpus: the number of cpus required.
        - disk: the amount of disk space required.
        - constraints: the constraints on the Marathon app.
        - instances: the number of instances required.
        - cmd: the command to be executed.
        - args: an alternative to cmd that requires the docker container to have an entrypoint.

        The last 7 keys are retrieved using the get_<key> functions defined above.

        :param app_id: The app id
        :param docker_url: The url to the docker image the app will actually execute
        :param docker_volumes: The docker volumes to run the image with, via the
                               marathon configuration file
        :param service_namespace_config: The service instance's configuration dict
        :returns: A dict containing all of the keys listed above"""

        # A set of config attributes that don't get included in the hash of the config.
        # These should be things that PaaSTA/Marathon knows how to change without requiring a bounce.
        CONFIG_HASH_BLACKLIST = set(['instances', 'backoff_seconds'])

        system_paasta_config = load_system_paasta_config()
        docker_url = get_docker_url(system_paasta_config.get_docker_registry(),
                                    self.get_docker_image())
        service_namespace_config = load_service_namespace_config(
            service=self.service,
            namespace=self.get_nerve_namespace(),
        )
        docker_volumes = system_paasta_config.get_volumes(
        ) + self.get_extra_volumes()

        net = get_mesos_network_for_net(self.get_net())

        complete_config = {
            'container': {
                'docker': {
                    'image':
                    docker_url,
                    'network':
                    net,
                    'portMappings': [
                        {
                            'containerPort': CONTAINER_PORT,
                            'hostPort': 0,
                            'protocol': 'tcp',
                        },
                    ],
                    "parameters": [
                        {
                            "key": "memory-swap",
                            "value": "%sm" % str(self.get_mem())
                        },
                    ]
                },
                'type': 'DOCKER',
                'volumes': docker_volumes,
            },
            'uris': [
                system_paasta_config.get_dockerfile_location(),
            ],
            'backoff_seconds': self.get_backoff_seconds(),
            'backoff_factor': 2,
            'health_checks': self.get_healthchecks(service_namespace_config),
            'env': self.get_env(),
            'mem': float(self.get_mem()),
            'cpus': float(self.get_cpus()),
            'disk': float(self.get_disk()),
            'constraints': self.get_constraints(service_namespace_config),
            'instances': self.get_instances(),
            'cmd': self.get_cmd(),
            'args': self.get_args(),
        }

        if net == 'BRIDGE':
            complete_config['container']['docker']['portMappings'] = [
                {
                    'containerPort': CONTAINER_PORT,
                    'hostPort': 0,
                    'protocol': 'tcp',
                },
            ]

        accepted_resource_roles = self.get_accepted_resource_roles()
        if accepted_resource_roles is not None:
            complete_config[
                'accepted_resource_roles'] = accepted_resource_roles

        code_sha = get_code_sha_from_dockerurl(docker_url)

        config_hash = get_config_hash(
            {
                key: value
                for key, value in complete_config.items()
                if key not in CONFIG_HASH_BLACKLIST
            },
            force_bounce=self.get_force_bounce(),
        )
        complete_config['id'] = format_job_id(self.service, self.instance,
                                              code_sha, config_hash)

        log.debug("Complete configuration for instance is: %s",
                  complete_config)
        return complete_config
Пример #22
0
def configure_and_run_docker_container(docker_client,
                                       docker_hash,
                                       service,
                                       instance,
                                       cluster,
                                       args,
                                       pull_image=False,
                                       dry_run=False):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        sys.stdout.write(
            PaastaColors.yellow(
                "Warning: Couldn't load config files from '/etc/paasta'. This indicates\n"
                "PaaSTA is not configured locally on this host, and local-run may not behave\n"
                "the same way it would behave on a server configured for PaaSTA.\n"
            ))
        system_paasta_config = SystemPaastaConfig({"volumes": []},
                                                  '/etc/paasta')

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    instance_type = validate_service_instance(service, instance, cluster,
                                              soa_dir)

    load_deployments = docker_hash is None or pull_image

    try:
        instance_config = get_instance_config(
            service=service,
            instance=instance,
            cluster=cluster,
            load_deployments=load_deployments,
            soa_dir=soa_dir,
        )
    except NoDeploymentsAvailable:
        sys.stderr.write(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
                "You can generate this by running:\n"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n"
                % {
                    'soa_dir': soa_dir,
                    'service': service
                }))
        return

    if docker_hash is None:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(),
                instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(
                PaastaColors.red(
                    "Error: No sha has been marked for deployment for the %s deploy group.\n"
                    "Please ensure this service has either run through a jenkins pipeline "
                    "or paasta mark-for-deployment has been run for %s" %
                    (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' %
                       (volume['hostPath'], volume['containerPath'],
                        volume['mode'].lower()))

    if args.interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config),
                                  posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=args.interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
    )
Пример #23
0
def configure_and_run_docker_container(docker_client, docker_hash, service, instance, cluster, args, pull_image=False):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        sys.stdout.write(PaastaColors.yellow(
            "Warning: Couldn't load config files from '/etc/paasta'. This indicates\n"
            "PaaSTA is not configured locally on this host, and local-run may not behave\n"
            "the same way it would behave on a server configured for PaaSTA.\n"
        ))
        system_paasta_config = SystemPaastaConfig({"volumes": []}, '/etc/paasta')

    volumes = list()
    instance_config = get_instance_config(
        service=service,
        instance=instance,
        cluster=cluster,
        load_deployments=pull_image,
        soa_dir=args.yelpsoa_config_root,
    )

    if pull_image:
        docker_url = get_docker_url(
            system_paasta_config.get_docker_registry(), instance_config.get_docker_image())
        docker_pull_image(docker_url)

        docker_hash = docker_url

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))

    if args.interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command = shlex.split(command_from_config)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=args.interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
    )
Пример #24
0
def configure_and_run_docker_container(docker_client,
                                       docker_hash,
                                       service,
                                       instance,
                                       cluster,
                                       system_paasta_config,
                                       args,
                                       pull_image=False,
                                       dry_run=False):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    load_deployments = docker_hash is None or pull_image

    interactive = args.interactive

    try:
        if instance is None and args.healthcheck:
            sys.stderr.write(
                "With --healthcheck, --instance must be provided!\n")
            sys.exit(1)
        if instance is None:
            instance_type = 'adhoc'
            instance = 'interactive'
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(service, instance,
                                                      cluster, soa_dir)
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        sys.stderr.write(str(e) + '\n')
        return
    except NoDeploymentsAvailable:
        sys.stderr.write(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
                "You can generate this by running:\n"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n"
                % {
                    'soa_dir': soa_dir,
                    'service': service
                }))
        return

    if docker_hash is None:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(),
                instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(
                PaastaColors.red(
                    "Error: No sha has been marked for deployment for the %s deploy group.\n"
                    "Please ensure this service has either run through a jenkins pipeline "
                    "or paasta mark-for-deployment has been run for %s\n" %
                    (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' %
                       (volume['hostPath'], volume['containerPath'],
                        volume['mode'].lower()))

    if interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config),
                                  posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
    )
Пример #25
0
def configure_and_run_docker_container(
        docker_client,
        docker_hash,
        service,
        instance,
        cluster,
        args,
        pull_image=False,
        dry_run=False
):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        sys.stdout.write(PaastaColors.yellow(
            "Warning: Couldn't load config files from '/etc/paasta'. This indicates\n"
            "PaaSTA is not configured locally on this host, and local-run may not behave\n"
            "the same way it would behave on a server configured for PaaSTA.\n"
        ))
        system_paasta_config = SystemPaastaConfig({"volumes": []}, '/etc/paasta')

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    instance_type = validate_service_instance(service, instance, cluster, soa_dir)

    try:
        instance_config = get_instance_config(
            service=service,
            instance=instance,
            cluster=cluster,
            load_deployments=pull_image,
            soa_dir=soa_dir,
        )
    except NoDeploymentsAvailable:
        sys.stderr.write(PaastaColors.red(
            "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
            "You can generate this by running:\n"
            "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n" % {
                'soa_dir': soa_dir, 'service': service}))
        return

    if pull_image:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(), instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(PaastaColors.red(
                "Error: No sha has been marked for deployment for the %s deploy group.\n"
                "Please ensure this service has either run through a jenkins pipeline "
                "or paasta mark-for-deployment has been run for %s" % (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))

    if args.interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config), posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=args.interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
    )
Пример #26
0
    def base_task(self, system_paasta_config, portMappings=True):
        """Return a TaskInfo Dict with all the fields corresponding to the
        configuration filled in.

        Does not include task.slave_id or a task.id; those need to be
        computed separately.
        """
        docker_volumes = self.get_volumes(
            system_volumes=system_paasta_config.get_volumes())
        task = Dict({
            'container': {
                'type':
                'DOCKER',
                'docker': {
                    'image':
                    get_docker_url(system_paasta_config.get_docker_registry(),
                                   self.get_docker_image()),
                    'parameters': [
                        Dict(key=param['key'], value=param['value'])
                        for param in self.format_docker_parameters()
                    ],
                    'network':
                    self.get_mesos_network_mode()
                },
                'volumes': [{
                    'container_path': volume['containerPath'],
                    'host_path': volume['hostPath'],
                    'mode': volume['mode'].upper(),
                } for volume in docker_volumes],
            },
            'command': {
                'value':
                self.get_cmd(),
                'uris': [{
                    'value': system_paasta_config.get_dockercfg_location(),
                    'extract': False
                }]
            },
            'resources': [{
                'name': 'cpus',
                'type': 'SCALAR',
                'scalar': {
                    'value': self.get_cpus()
                },
            }, {
                'name': 'mem',
                'type': 'SCALAR',
                'scalar': {
                    'value': self.get_mem()
                }
            }],
        })

        if portMappings:
            task.container.docker.port_mappings = [
                Dict(
                    container_port=self.get_container_port(),
                    # filled by tasks_and_state_for_offer()
                    host_port=0,
                    protocol='tcp')
            ]

            task.resources.append(
                Dict(
                    name='ports',
                    type='RANGES',
                    ranges=Dict(
                        # filled by tasks_and_state_for_offer
                        range=[Dict(begin=0, end=0)])))

        task.name = self.task_name(task)

        return Dict(task)