Beispiel #1
0
def get_marathon_services_running_here_for_nerve(cluster, soa_dir):
    if not cluster:
        try:
            cluster = load_system_paasta_config().get_cluster()
        # In the cases where there is *no* cluster or in the case
        # where there isn't a Paasta configuration file at *all*, then
        # there must be no marathon services running here, so we catch
        # these custom exceptions and return [].
        except (PaastaNotConfiguredError):
            return []
    # When a cluster is defined in mesos, let's iterate through marathon services
    marathon_services = marathon_services_running_here()
    nerve_list = []
    for name, instance, port in marathon_services:
        try:
            registrations = read_all_registrations_for_service_instance(
                name, instance, cluster, soa_dir)
            for registration in registrations:
                reg_service, reg_namespace, _, __ = decompose_job_id(
                    registration)
                nerve_dict = load_service_namespace_config(
                    service=reg_service,
                    namespace=reg_namespace,
                    soa_dir=soa_dir,
                )
                if not nerve_dict.is_in_smartstack():
                    continue
                nerve_dict['port'] = port
                nerve_list.append((registration, nerve_dict))
        except KeyError:
            continue  # SOA configs got deleted for this app, it'll get cleaned up
    return nerve_list
Beispiel #2
0
def get_healthcheck_for_instance(service,
                                 instance,
                                 service_manifest,
                                 random_port,
                                 soa_dir=DEFAULT_SOA_DIR):
    """
    Returns healthcheck for a given service instance in the form of a tuple (mode, healthcheck_command)
    or (None, None) if no healthcheck
    """
    smartstack_config = load_service_namespace_config(service, instance,
                                                      soa_dir)
    mode = service_manifest.get_healthcheck_mode(smartstack_config)
    hostname = socket.getfqdn()

    if mode == "http":
        path = service_manifest.get_healthcheck_uri(smartstack_config)
        healthcheck_command = '%s://%s:%d%s' % (mode, hostname, random_port,
                                                path)
    elif mode == "tcp":
        healthcheck_command = '%s://%s:%d' % (mode, hostname, random_port)
    elif mode == 'cmd':
        healthcheck_command = service_manifest.get_healthcheck_cmd()
    else:
        mode = None
        healthcheck_command = None
    return (mode, healthcheck_command)
Beispiel #3
0
def _namespaced_get_classic_service_information_for_nerve(
        name, namespace, soa_dir):
    nerve_dict = load_service_namespace_config(name, namespace, soa_dir)
    port_file = os.path.join(soa_dir, name, 'port')
    nerve_dict['port'] = service_configuration_lib.read_port(port_file)
    nerve_name = compose_job_id(name, namespace)
    return (nerve_name, nerve_dict)
Beispiel #4
0
def get_marathon_services_running_here_for_nerve(cluster, soa_dir):
    if not cluster:
        try:
            cluster = load_system_paasta_config().get_cluster()
        # In the cases where there is *no* cluster or in the case
        # where there isn't a Paasta configuration file at *all*, then
        # there must be no marathon services running here, so we catch
        # these custom exceptions and return [].
        except (PaastaNotConfiguredError):
            return []
    # When a cluster is defined in mesos, let's iterate through marathon services
    marathon_services = marathon_services_running_here()
    nerve_list = []
    for name, instance, port in marathon_services:
        try:
            registrations = read_all_registrations_for_service_instance(
                name, instance, cluster, soa_dir
            )
            for registration in registrations:
                reg_service, reg_namespace, _, __ = decompose_job_id(registration)
                nerve_dict = load_service_namespace_config(
                    service=reg_service, namespace=reg_namespace, soa_dir=soa_dir,
                )
                if not nerve_dict.is_in_smartstack():
                    continue
                nerve_dict['port'] = port
                nerve_list.append((registration, nerve_dict))
        except KeyError:
            continue  # SOA configs got deleted for this app, it'll get cleaned up
    return nerve_list
Beispiel #5
0
def get_proxy_port_for_instance(name,
                                instance,
                                cluster=None,
                                soa_dir=DEFAULT_SOA_DIR):
    """Get the proxy_port defined in the first namespace configuration for a
    service instance.

    This means that the namespace first has to be loaded from the service instance's
    configuration, and then the proxy_port has to loaded from the smartstack configuration
    for that namespace.

    :param name: The service name
    :param instance: The instance of the service
    :param cluster: The cluster to read the configuration for
    :param soa_dir: The SOA config directory to read from
    :returns: The proxy_port for the service instance, or None if not defined"""
    registration = read_registration_for_service_instance(
        name, instance, cluster, soa_dir)
    service, namespace, _, __ = decompose_job_id(registration)
    nerve_dict = load_service_namespace_config(
        service=service,
        namespace=namespace,
        soa_dir=soa_dir,
    )
    return nerve_dict.get('proxy_port')
Beispiel #6
0
 def get_pod_template_spec(
     self,
     code_sha: str,
     system_paasta_config: SystemPaastaConfig,
 ) -> V1PodTemplateSpec:
     service_namespace_config = load_service_namespace_config(
         service=self.service,
         namespace=self.get_nerve_namespace(),
     )
     docker_volumes = self.get_volumes(
         system_volumes=system_paasta_config.get_volumes())
     return V1PodTemplateSpec(
         metadata=V1ObjectMeta(labels={
             "service": self.get_service(),
             "instance": self.get_instance(),
             "git_sha": code_sha,
         }, ),
         spec=V1PodSpec(
             containers=self.get_kubernetes_containers(
                 docker_volumes=docker_volumes,
                 aws_ebs_volumes=self.get_aws_ebs_volumes(),
                 system_paasta_config=system_paasta_config,
                 service_namespace_config=service_namespace_config,
             ),
             restart_policy="Always",
             volumes=self.get_pod_volumes(
                 docker_volumes=docker_volumes,
                 aws_ebs_volumes=self.get_aws_ebs_volumes(),
             ),
         ),
     )
Beispiel #7
0
def load_paasta_native_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR):
    service_paasta_native_jobs = read_paasta_native_jobs_for_service(service, cluster, soa_dir=soa_dir)
    if instance not in service_paasta_native_jobs:
        filename = '%s/%s/paasta_native-%s.yaml' % (soa_dir, service, cluster)
        raise UnknownPaastaNativeServiceError(
            'No job named "%s" in config file %s: \n%s' % (instance, filename, open(filename).read())
        )
    branch_dict = {}
    if load_deployments:
        deployments_json = load_deployments_json(service, soa_dir=soa_dir)
        branch = get_paasta_branch(cluster=cluster, instance=instance)
        branch_dict = deployments_json.get_branch_dict(service, branch)

    service_config = PaastaNativeServiceConfig(
        service=service,
        cluster=cluster,
        instance=instance,
        config_dict=service_paasta_native_jobs[instance],
        branch_dict=branch_dict,
    )

    service_namespace_config = load_service_namespace_config(service, service_config.get_nerve_namespace(),
                                                             soa_dir=soa_dir)
    service_config.service_namespace_config = service_namespace_config

    return service_config
def load_paasta_native_job_config(service,
                                  instance,
                                  cluster,
                                  load_deployments=True,
                                  soa_dir=DEFAULT_SOA_DIR):
    service_paasta_native_jobs = read_paasta_native_jobs_for_service(
        service, cluster, soa_dir=soa_dir)
    if instance not in service_paasta_native_jobs:
        filename = '%s/%s/paasta_native-%s.yaml' % (soa_dir, service, cluster)
        raise UnknownPaastaNativeServiceError(
            'No job named "%s" in config file %s: \n%s' %
            (instance, filename, open(filename).read()))
    branch_dict = {}
    if load_deployments:
        deployments_json = load_deployments_json(service, soa_dir=soa_dir)
        branch = get_paasta_branch(cluster=cluster, instance=instance)
        branch_dict = deployments_json.get_branch_dict(service, branch)

    service_config = PaastaNativeServiceConfig(
        service=service,
        cluster=cluster,
        instance=instance,
        config_dict=service_paasta_native_jobs[instance],
        branch_dict=branch_dict,
    )

    service_namespace_config = load_service_namespace_config(
        service=service,
        namespace=service_config.get_nerve_namespace(),
        soa_dir=soa_dir)
    service_config.service_namespace_config = service_namespace_config

    return service_config
Beispiel #9
0
def _namespaced_get_classic_service_information_for_nerve(
        name, namespace, soa_dir):
    nerve_dict = load_service_namespace_config(name, namespace, soa_dir)
    port_file = os.path.join(soa_dir, name, 'port')
    # If the namespace defines a port, prefer that, otherwise use the
    # service wide port file.
    nerve_dict['port'] = (nerve_dict.get('port', None)
                          or service_configuration_lib.read_port(port_file))
    nerve_name = compose_job_id(name, namespace)
    return (nerve_name, nerve_dict)
def load_paasta_native_job_config(
    service: str,
    instance: str,
    cluster: str,
    load_deployments: bool = True,
    soa_dir: str = DEFAULT_SOA_DIR,
    instance_type: str = "paasta_native",
    config_overrides: Optional[NativeServiceConfigDict] = None,
) -> NativeServiceConfig:
    instance_config_dict = cast(
        NativeServiceConfigDict,
        load_service_instance_config(
            service=service,
            instance=instance,
            instance_type=instance_type,
            cluster=cluster,
            soa_dir=soa_dir,
        ),
    )
    branch_dict: Optional[BranchDictV2] = None
    instance_config_dict.update(config_overrides or {})
    if load_deployments:
        deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
        temp_instance_config = NativeServiceConfig(
            service=service,
            cluster=cluster,
            instance=instance,
            config_dict=instance_config_dict,
            branch_dict=None,
            soa_dir=soa_dir,
        )
        branch = temp_instance_config.get_branch()
        deploy_group = temp_instance_config.get_deploy_group()
        branch_dict = deployments_json.get_branch_dict(service, branch,
                                                       deploy_group)

    service_config = NativeServiceConfig(
        service=service,
        cluster=cluster,
        instance=instance,
        config_dict=instance_config_dict,
        branch_dict=branch_dict,
        soa_dir=soa_dir,
    )

    service_namespace_config = load_service_namespace_config(
        service=service,
        namespace=service_config.get_nerve_namespace(),
        soa_dir=soa_dir)
    service_config.service_namespace_config = service_namespace_config

    return service_config
Beispiel #11
0
def load_paasta_native_job_config(
    service,
    instance,
    cluster,
    load_deployments=True,
    soa_dir=DEFAULT_SOA_DIR,
    instance_type='paasta_native',
    config_overrides=None,
) -> NativeServiceConfig:
    service_paasta_native_jobs = read_service_config(
        service=service,
        instance=instance,
        instance_type=instance_type,
        cluster=cluster,
        soa_dir=soa_dir,
    )
    branch_dict: Optional[BranchDictV2] = None
    instance_config_dict = service_paasta_native_jobs[instance].copy()
    instance_config_dict.update(config_overrides or {})
    if load_deployments:
        deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
        temp_instance_config = NativeServiceConfig(
            service=service,
            cluster=cluster,
            instance=instance,
            config_dict=instance_config_dict,
            branch_dict=None,
            soa_dir=soa_dir,
        )
        branch = temp_instance_config.get_branch()
        deploy_group = temp_instance_config.get_deploy_group()
        branch_dict = deployments_json.get_branch_dict(service, branch,
                                                       deploy_group)

    service_config = NativeServiceConfig(
        service=service,
        cluster=cluster,
        instance=instance,
        config_dict=instance_config_dict,
        branch_dict=branch_dict,
        soa_dir=soa_dir,
    )

    service_namespace_config = load_service_namespace_config(
        service=service,
        namespace=service_config.get_nerve_namespace(),
        soa_dir=soa_dir,
    )
    service_config.service_namespace_config = service_namespace_config

    return service_config
Beispiel #12
0
def get_kubernetes_services_running_here_for_nerve(
    cluster: Optional[str],
    soa_dir: str,
) -> Sequence[Tuple[str, ServiceNamespaceConfig]]:
    try:
        system_paasta_config = load_system_paasta_config()
        if not cluster:
            cluster = system_paasta_config.get_cluster()
        # In the cases where there is *no* cluster or in the case
        # where there isn't a Paasta configuration file at *all*, then
        # there must be no kubernetes services running here, so we catch
        # these custom exceptions and return [].
        if not system_paasta_config.get_register_k8s_pods():
            return []
    except PaastaNotConfiguredError:
        log.warning(
            "No PaaSTA config so skipping registering k8s pods in nerve")
        return []
    kubernetes_services = get_kubernetes_services_running_here()
    nerve_list = []
    for kubernetes_service in kubernetes_services:
        try:
            for registration in kubernetes_service.registrations:
                reg_service, reg_namespace, _, __ = decompose_job_id(
                    registration)
                try:
                    nerve_dict = load_service_namespace_config(
                        service=reg_service,
                        namespace=reg_namespace,
                        soa_dir=soa_dir,
                    )
                except Exception as e:
                    log.warning(str(e))
                    log.warning(
                        f"Could not get smartstack config for {reg_service}.{reg_namespace}, skipping"
                    )
                    # but the show must go on!
                    continue
                if not nerve_dict.is_in_smartstack():
                    continue
                nerve_dict['port'] = kubernetes_service.port
                nerve_dict['service_ip'] = kubernetes_service.pod_ip
                if system_paasta_config.get_kubernetes_use_hacheck_sidecar():
                    nerve_dict['hacheck_ip'] = kubernetes_service.pod_ip
                nerve_list.append((registration, nerve_dict))
        except (KeyError):
            continue  # SOA configs got deleted for this app, it'll get cleaned up

    return nerve_list
Beispiel #13
0
def get_kubernetes_services_running_here_for_nerve(
    cluster: str,
    soa_dir: str,
) -> Sequence[Tuple[str, ServiceNamespaceConfig]]:
    try:
        system_paasta_config = load_system_paasta_config()
        if not cluster:
            cluster = system_paasta_config.get_cluster()
        # In the cases where there is *no* cluster or in the case
        # where there isn't a Paasta configuration file at *all*, then
        # there must be no kubernetes services running here, so we catch
        # these custom exceptions and return [].
        if not system_paasta_config.get_register_k8s_pods():
            return []
    except PaastaNotConfiguredError:
        log.warning(
            "No PaaSTA config so skipping registering k8s pods in nerve")
        return []
    kubernetes_services = get_kubernetes_services_running_here()
    nerve_list = []
    for kubernetes_service in kubernetes_services:
        try:
            kubernetes_service_config = load_kubernetes_service_config(
                service=kubernetes_service.name,
                instance=kubernetes_service.instance,
                cluster=cluster,
                load_deployments=False,
                soa_dir=soa_dir,
            )
            for registration in kubernetes_service_config.get_registrations():
                reg_service, reg_namespace, _, __ = decompose_job_id(
                    registration)
                nerve_dict = load_service_namespace_config(
                    service=reg_service,
                    namespace=reg_namespace,
                    soa_dir=soa_dir,
                )
                if not nerve_dict.is_in_smartstack():
                    continue
                nerve_dict['port'] = kubernetes_service.port
                nerve_dict['service_ip'] = kubernetes_service.pod_ip
                nerve_dict['hacheck_ip'] = kubernetes_service.pod_ip
                nerve_list.append((registration, nerve_dict))
        except (KeyError, NoConfigurationForServiceError):
            continue  # SOA configs got deleted for this app, it'll get cleaned up

    return nerve_list
Beispiel #14
0
def get_proxy_port_for_instance(name, instance, cluster=None, soa_dir=DEFAULT_SOA_DIR):
    """Get the proxy_port defined in the first namespace configuration for a
    service instance.

    This means that the namespace first has to be loaded from the service instance's
    configuration, and then the proxy_port has to loaded from the smartstack configuration
    for that namespace.

    :param name: The service name
    :param instance: The instance of the service
    :param cluster: The cluster to read the configuration for
    :param soa_dir: The SOA config directory to read from
    :returns: The proxy_port for the service instance, or None if not defined"""
    registration = read_registration_for_service_instance(name, instance, cluster, soa_dir)
    service, namespace, _, __ = decompose_job_id(registration)
    nerve_dict = load_service_namespace_config(service, namespace, soa_dir)
    return nerve_dict.get('proxy_port')
Beispiel #15
0
def get_paasta_native_services_running_here_for_nerve(
    cluster: Optional[str],
    soa_dir: str,
    hostname: Optional[str] = None,
) -> Sequence[Tuple[str, ServiceNamespaceConfig]]:
    if not cluster:
        try:
            system_paasta_config = load_system_paasta_config()
            cluster = system_paasta_config.get_cluster()
        # In the cases where there is *no* cluster or in the case
        # where there isn't a Paasta configuration file at *all*, then
        # there must be no native services running here, so we catch
        # these custom exceptions and return [].
        except (PaastaNotConfiguredError):
            return []
        if not system_paasta_config.get_register_native_services():
            return []
    # When a cluster is defined in mesos, let's iterate through paasta_native services
    paasta_native_services = paasta_native_services_running_here(
        hostname=hostname)
    nerve_list = []
    for name, instance, port in paasta_native_services:
        try:
            job_config = load_paasta_native_job_config(
                service=name,
                instance=instance,
                cluster=cluster,
                load_deployments=False,
                soa_dir=soa_dir,
            )
            for registration in job_config.get_registrations():
                reg_service, reg_namespace, _, __ = decompose_job_id(
                    registration)
                nerve_dict = load_service_namespace_config(
                    service=reg_service,
                    namespace=reg_namespace,
                    soa_dir=soa_dir,
                )
                if not nerve_dict.is_in_smartstack():
                    continue
                nerve_dict['port'] = port
                nerve_list.append((registration, nerve_dict))
        except KeyError:
            continue  # SOA configs got deleted for this app, it'll get cleaned up
    return nerve_list
def load_paasta_native_job_config(
    service,
    instance,
    cluster,
    load_deployments=True,
    soa_dir=DEFAULT_SOA_DIR,
    instance_type='paasta_native',
    config_overrides=None,
) -> NativeServiceConfig:
    service_paasta_native_jobs = read_service_config(
        service=service,
        instance=instance,
        instance_type=instance_type,
        cluster=cluster,
        soa_dir=soa_dir,
    )
    branch_dict: BranchDict = {}
    if load_deployments:
        deployments_json = load_deployments_json(service, soa_dir=soa_dir)
        branch = get_paasta_branch(cluster=cluster, instance=instance)
        branch_dict = deployments_json.get_branch_dict(service, branch)

    instance_config_dict = service_paasta_native_jobs[instance].copy()
    instance_config_dict.update(config_overrides or {})
    service_config = NativeServiceConfig(
        service=service,
        cluster=cluster,
        instance=instance,
        config_dict=instance_config_dict,
        branch_dict=branch_dict,
        soa_dir=soa_dir,
    )

    service_namespace_config = load_service_namespace_config(
        service=service,
        namespace=service_config.get_nerve_namespace(),
        soa_dir=soa_dir,
    )
    service_config.service_namespace_config = service_namespace_config

    return service_config
Beispiel #17
0
 def get_pod_template_spec(
     self,
     code_sha: str,
     system_paasta_config: SystemPaastaConfig,
 ) -> V1PodTemplateSpec:
     service_namespace_config = load_service_namespace_config(
         service=self.service,
         namespace=self.get_nerve_namespace(),
     )
     docker_volumes = self.get_volumes(
         system_volumes=system_paasta_config.get_volumes())
     return V1PodTemplateSpec(
         metadata=V1ObjectMeta(
             labels={
                 "yelp.com/paasta_service": self.get_service(),
                 "yelp.com/paasta_instance": self.get_instance(),
                 "yelp.com/paasta_git_sha": code_sha,
             },
             annotations={
                 "smartstack_registrations":
                 json.dumps(self.get_registrations()),
             },
         ),
         spec=V1PodSpec(
             service_account_name=self.get_kubernetes_service_account_name(
             ),
             containers=self.get_kubernetes_containers(
                 docker_volumes=docker_volumes,
                 aws_ebs_volumes=self.get_aws_ebs_volumes(),
                 system_paasta_config=system_paasta_config,
                 service_namespace_config=service_namespace_config,
             ),
             restart_policy="Always",
             volumes=self.get_pod_volumes(
                 docker_volumes=docker_volumes,
                 aws_ebs_volumes=self.get_aws_ebs_volumes(),
             ),
             dns_policy="Default",
         ),
     )
def filter_autoscaling_tasks(
    marathon_apps: Sequence[MarathonApp],
    all_mesos_tasks: Sequence[Task],
    config: MarathonServiceConfig,
    system_paasta_config: SystemPaastaConfig,
) -> Tuple[Mapping[str, MarathonTask], Sequence[Task]]:
    """Find the tasks that are serving traffic. We care about this because many tasks have a period of high CPU when
    they first start up, during which they warm up code, load and process data, etc., and we don't want this high load
    to drag our overall load estimate upwards. Allowing these tasks to count towards overall load could cause a cycle of
    scaling up, seeing high load due to new warming-up containers, scaling up, until we hit max_instances.

    However, accidentally omitting a task that actually is serving traffic will cause us to underestimate load; this is
    generally much worse than overestimating, since it can cause us to incorrectly scale down or refuse to scale up when
    necessary. For this reason, we look at several sources of health information, and if they disagree, assume the task
    is serving traffic.
    """
    job_id_prefix = "{}{}".format(
        format_job_id(service=config.service, instance=config.instance),
        MESOS_TASK_SPACER,
    )

    # Get a dict of healthy tasks, we assume tasks with no healthcheck defined are healthy.
    # We assume tasks with no healthcheck results but a defined healthcheck to be unhealthy, unless they are "old" in
    # which case we assume that Marathon has screwed up and stopped healthchecking but that they are healthy.

    log.info("Inspecting %s for autoscaling" % job_id_prefix)

    relevant_tasks_by_app: Dict[MarathonApp, List[MarathonTask]] = {
        app: app.tasks
        for app in marathon_apps
        if app.id.lstrip("/").startswith(job_id_prefix)
    }

    healthy_marathon_tasks: Dict[str, MarathonTask] = {}

    for app, tasks in relevant_tasks_by_app.items():
        for task in tasks:
            if (is_task_healthy(task) or not app.health_checks
                    or is_old_task_missing_healthchecks(task, app)):
                healthy_marathon_tasks[task.id] = task

    service_namespace_config = load_service_namespace_config(
        service=config.service, namespace=config.get_nerve_namespace())
    if service_namespace_config.is_in_smartstack():

        for task in filter_tasks_in_smartstack(
                tasks=[
                    task for tasks in relevant_tasks_by_app.values()
                    for task in tasks
                ],
                service=config.service,
                nerve_ns=config.get_nerve_namespace(),
                system_paasta_config=system_paasta_config,
                max_hosts_to_query=20,
                haproxy_min_fraction_up=
                0.01,  # Be very liberal. See docstring above for rationale.
        ):
            healthy_marathon_tasks[task.id] = task

    if not healthy_marathon_tasks:
        raise MetricsProviderNoDataError(
            "Couldn't find any healthy marathon tasks")
    mesos_tasks = [
        task for task in all_mesos_tasks
        if task["id"] in healthy_marathon_tasks
    ]
    return (healthy_marathon_tasks, mesos_tasks)
Beispiel #19
0
    def format_marathon_app_dict(self):
        """Create the configuration that will be passed to the Marathon REST API.

        Currently compiles the following keys into one nice dict:

        - id: the ID of the image in Marathon
        - container: a dict containing the docker url and docker launch options. Needed by deimos.
        - uris: blank.
        - ports: an array containing the port.
        - env: environment variables for the container.
        - mem: the amount of memory required.
        - cpus: the number of cpus required.
        - disk: the amount of disk space required.
        - constraints: the constraints on the Marathon app.
        - instances: the number of instances required.
        - cmd: the command to be executed.
        - args: an alternative to cmd that requires the docker container to have an entrypoint.

        The last 7 keys are retrieved using the get_<key> functions defined above.

        :param app_id: The app id
        :param docker_url: The url to the docker image the app will actually execute
        :param docker_volumes: The docker volumes to run the image with, via the
                               marathon configuration file
        :param service_namespace_config: The service instance's configuration dict
        :returns: A dict containing all of the keys listed above"""

        system_paasta_config = load_system_paasta_config()
        docker_url = get_docker_url(system_paasta_config.get_docker_registry(), self.get_docker_image())
        service_namespace_config = load_service_namespace_config(
            service=self.service,
            namespace=self.get_nerve_namespace(),
        )
        docker_volumes = system_paasta_config.get_volumes() + self.get_extra_volumes()

        net = get_mesos_network_for_net(self.get_net())

        complete_config = {
            'container': {
                'docker': {
                    'image': docker_url,
                    'network': net,
                    "parameters": self.format_docker_parameters(),
                },
                'type': 'DOCKER',
                'volumes': docker_volumes,
            },
            'uris': [system_paasta_config.get_dockercfg_location(), ],
            'backoff_seconds': self.get_backoff_seconds(),
            'backoff_factor': self.get_backoff_factor(),
            'max_launch_delay_seconds': self.get_max_launch_delay_seconds(),
            'health_checks': self.get_healthchecks(service_namespace_config),
            'env': self.get_env(),
            'mem': float(self.get_mem()),
            'cpus': float(self.get_cpus()),
            'disk': float(self.get_disk()),
            'constraints': self.get_calculated_constraints(service_namespace_config),
            'instances': self.get_desired_instances(),
            'cmd': self.get_cmd(),
            'args': self.get_args(),
        }

        if net == 'BRIDGE':
            complete_config['container']['docker']['portMappings'] = [
                {
                    'containerPort': CONTAINER_PORT,
                    'hostPort': 0,
                    'protocol': 'tcp',
                },
            ]

        accepted_resource_roles = self.get_accepted_resource_roles()
        if accepted_resource_roles is not None:
            complete_config['accepted_resource_roles'] = accepted_resource_roles

        code_sha = get_code_sha_from_dockerurl(docker_url)

        config_hash = get_config_hash(
            {key: value for key, value in complete_config.items() if key not in CONFIG_HASH_BLACKLIST},
            force_bounce=self.get_force_bounce(),
        )
        complete_config['id'] = format_job_id(self.service, self.instance, code_sha, config_hash)

        log.debug("Complete configuration for instance is: %s", complete_config)
        return complete_config
Beispiel #20
0
def _namespaced_get_classic_service_information_for_nerve(name, namespace, soa_dir):
    nerve_dict = load_service_namespace_config(name, namespace, soa_dir)
    port_file = os.path.join(soa_dir, name, 'port')
    nerve_dict['port'] = service_configuration_lib.read_port(port_file)
    nerve_name = compose_job_id(name, namespace)
    return (nerve_name, nerve_dict)
Beispiel #21
0
    def format_marathon_app_dict(self):
        """Create the configuration that will be passed to the Marathon REST API.

        Currently compiles the following keys into one nice dict:

        - id: the ID of the image in Marathon
        - container: a dict containing the docker url and docker launch options. Needed by deimos.
        - uris: blank.
        - ports: an array containing the port.
        - env: environment variables for the container.
        - mem: the amount of memory required.
        - cpus: the number of cpus required.
        - disk: the amount of disk space required.
        - constraints: the constraints on the Marathon app.
        - instances: the number of instances required.
        - cmd: the command to be executed.
        - args: an alternative to cmd that requires the docker container to have an entrypoint.

        The last 7 keys are retrieved using the get_<key> functions defined above.

        :param app_id: The app id
        :param docker_url: The url to the docker image the app will actually execute
        :param docker_volumes: The docker volumes to run the image with, via the
                               marathon configuration file
        :param service_namespace_config: The service instance's configuration dict
        :returns: A dict containing all of the keys listed above"""

        system_paasta_config = load_system_paasta_config()
        docker_url = get_docker_url(system_paasta_config.get_docker_registry(),
                                    self.get_docker_image())
        service_namespace_config = load_service_namespace_config(
            service=self.service,
            namespace=self.get_nerve_namespace(),
        )
        docker_volumes = self.get_volumes(
            system_volumes=system_paasta_config.get_volumes())

        net = get_mesos_network_for_net(self.get_net())

        complete_config = {
            'container': {
                'docker': {
                    'image': docker_url,
                    'network': net,
                    "parameters": self.format_docker_parameters(),
                },
                'type': 'DOCKER',
                'volumes': docker_volumes,
            },
            'uris': [
                system_paasta_config.get_dockercfg_location(),
            ],
            'backoff_seconds':
            self.get_backoff_seconds(),
            'backoff_factor':
            self.get_backoff_factor(),
            'max_launch_delay_seconds':
            self.get_max_launch_delay_seconds(),
            'health_checks':
            self.get_healthchecks(service_namespace_config),
            'env':
            self.get_env(),
            'mem':
            float(self.get_mem()),
            'cpus':
            float(self.get_cpus()),
            'disk':
            float(self.get_disk()),
            'constraints':
            self.get_calculated_constraints(
                system_paasta_config=system_paasta_config,
                service_namespace_config=service_namespace_config),
            'instances':
            self.get_desired_instances(),
            'cmd':
            self.get_cmd(),
            'args':
            self.get_args(),
        }

        if net == 'BRIDGE':
            complete_config['container']['docker']['portMappings'] = [
                {
                    'containerPort': self.get_container_port(),
                    'hostPort': self.get_host_port(),
                    'protocol': 'tcp',
                },
            ]
        else:
            complete_config['port_definitions'] = [
                {
                    'port': self.get_host_port(),
                    'protocol': 'tcp',
                },
            ]
            # Without this, we may end up with multiple containers requiring the same port on the same box.
            complete_config['require_ports'] = (self.get_host_port() != 0)

        accepted_resource_roles = self.get_accepted_resource_roles()
        if accepted_resource_roles is not None:
            complete_config[
                'accepted_resource_roles'] = accepted_resource_roles

        code_sha = get_code_sha_from_dockerurl(docker_url)

        config_hash = get_config_hash(
            self.sanitize_for_config_hash(complete_config),
            force_bounce=self.get_force_bounce(),
        )
        complete_config['id'] = format_job_id(self.service, self.instance,
                                              code_sha, config_hash)

        log.debug("Complete configuration for instance is: %s",
                  complete_config)
        return complete_config