Esempio n. 1
0
def test_get_instance_config_unknown(mock_validate_service_instance, ):
    with raises(NotImplementedError):
        mock_validate_service_instance.return_value = 'some bogus unsupported framework'
        utils.get_instance_config(
            service='fake_service',
            instance='fake_instance',
            cluster='fake_cluster',
            soa_dir='fake_soa_dir',
        )
    assert mock_validate_service_instance.call_count == 1
Esempio n. 2
0
def test_get_instance_config_unknown(
    mock_validate_service_instance,
):
    with raises(NotImplementedError):
        mock_validate_service_instance.return_value = 'some bogus unsupported framework'
        utils.get_instance_config(
            service='fake_service',
            instance='fake_instance',
            cluster='fake_cluster',
            soa_dir='fake_soa_dir',
        )
        assert mock_validate_service_instance.call_count == 1
Esempio n. 3
0
    def get_rules(self, soa_dir, synapse_service_dir):
        try:
            conf = get_instance_config(
                self.service,
                self.instance,
                load_system_paasta_config().get_cluster(),
                load_deployments=False,
                soa_dir=soa_dir,
            )
        except NotImplementedError:
            # PAASTA-11414: new instance types may not provide this configuration information;
            # we don't want to break all of the firewall infrastructure when that happens
            return ()
        except NoConfigurationForServiceError:
            # PAASTA-12050: a deleted service may still have containers running on PaaSTA hosts
            # for several minutes after the directory disappears from soa-configs.
            return ()

        if conf.get_dependencies() is None:
            return ()

        rules = list(_default_rules(conf, self.log_prefix))
        rules.extend(_well_known_rules(conf))
        rules.extend(_smartstack_rules(conf, soa_dir, synapse_service_dir))
        return tuple(rules)
Esempio n. 4
0
def smartstack_dependencies_of_running_firewalled_services(
        soa_dir=DEFAULT_SOA_DIR):
    dependencies_to_services = defaultdict(set)
    for service, instance, _, _ in firewall.services_running_here():
        config = get_instance_config(
            service,
            instance,
            load_system_paasta_config().get_cluster(),
            load_deployments=False,
            soa_dir=soa_dir,
        )
        outbound_firewall = config.get_outbound_firewall()
        if not outbound_firewall:
            continue

        dependencies = config.get_dependencies() or ()

        smartstack_dependencies = [
            d['smartstack'] for d in dependencies if d.get('smartstack')
        ]
        for smartstack_dependency in smartstack_dependencies:
            # TODO: filter down to only services that have no proxy_port
            dependencies_to_services[smartstack_dependency].add(
                firewall.ServiceGroup(service, instance))

    return dependencies_to_services
Esempio n. 5
0
def validate_paasta_objects(service_path):
    soa_dir, service = path_to_soa_dir_service(service_path)

    returncode = True
    messages = []
    for cluster in list_clusters(service, soa_dir):
        for instance in list_all_instances_for_service(service=service,
                                                       clusters=[cluster],
                                                       soa_dir=soa_dir):
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=False,
                soa_dir=soa_dir,
            )
            messages.extend(instance_config.validate())
    returncode = len(messages) == 0

    if messages:
        errors = "\n".join(messages)
        paasta_print(
            failure((f"There were failures validating {service}: {errors}"),
                    ""))
    else:
        paasta_print(
            success(f"All PaaSTA Instances for are valid for all clusters"))

    return returncode
Esempio n. 6
0
def paasta_start_or_stop(args, desired_state):
    """Requests a change of state to start or stop given branches of a service."""
    instance = args.instance
    cluster = args.cluster
    soa_dir = args.soa_dir
    service = figure_out_service_name(args=args, soa_dir=soa_dir)

    service_config = get_instance_config(
        service=service,
        cluster=cluster,
        instance=instance,
        soa_dir=soa_dir,
        load_deployments=False,
    )

    remote_refs = remote_git.list_remote_refs(utils.get_git_url(service))

    if 'refs/heads/paasta-%s' % service_config.get_deploy_group() not in remote_refs:
        print "No branches found for %s in %s." % \
            (service_config.get_deploy_group(), remote_refs)
        print "Has it been deployed there yet?"
        sys.exit(1)

    force_bounce = utils.format_timestamp(datetime.datetime.utcnow())
    issue_state_change_for_service(
        service_config=service_config,
        force_bounce=force_bounce,
        desired_state=desired_state,
    )
def emit_metrics_for_type(instance_type):
    cluster = load_system_paasta_config().get_cluster()
    instances = get_services_for_cluster(
        cluster=cluster,
        instance_type=instance_type,
    )

    for service, instance in instances:
        service_instance_config = get_instance_config(
            service=service,
            instance=instance,
            cluster=cluster,
        )
        dimensions = {
            'paasta_service': service_instance_config.service,
            'paasta_cluster': service_instance_config.cluster,
            'paasta_instance': service_instance_config.instance,
        }

        log.info(f"Emitting paasta.service.* with dimensions {dimensions}")
        gauge = yelp_meteorite.create_gauge('paasta.service.cpus', dimensions)
        gauge.set(service_instance_config.get_cpus())
        gauge = yelp_meteorite.create_gauge('paasta.service.mem', dimensions)
        gauge.set(service_instance_config.get_mem())
        gauge = yelp_meteorite.create_gauge('paasta.service.disk', dimensions)
        gauge.set(service_instance_config.get_disk())
        if hasattr(service_instance_config, 'get_instances'):
            if service_instance_config.get_max_instances() is None:
                gauge = yelp_meteorite.create_gauge('paasta.service.instances', dimensions)
                gauge.set(service_instance_config.get_instances())
Esempio n. 8
0
def validate_secrets(service_path):
    soa_dir, service = path_to_soa_dir_service(service_path)
    system_paasta_config = load_system_paasta_config()
    vault_cluster_map = system_paasta_config.get_vault_cluster_config()
    return_value = True
    for cluster in list_clusters(service, soa_dir):
        vault_env = vault_cluster_map.get(cluster)
        if not vault_env:
            print(failure(f"{cluster} not found on vault_cluster_map", ""))
            return_value = False
            continue

        for instance in list_all_instances_for_service(service=service,
                                                       clusters=[cluster],
                                                       soa_dir=soa_dir):
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=False,
                soa_dir=soa_dir,
            )
            if not check_secrets_for_instance(instance_config.config_dict,
                                              soa_dir, service_path,
                                              vault_env):
                return_value = False
    if return_value:
        print(success("No orphan secrets found"))
    return return_value
Esempio n. 9
0
def paasta_start_or_stop(args, desired_state):
    """Requests a change of state to start or stop given branches of a service."""
    instance = args.instance
    cluster = args.cluster
    soa_dir = args.soa_dir
    service = figure_out_service_name(args=args, soa_dir=soa_dir)

    service_config = get_instance_config(
        service=service,
        cluster=cluster,
        instance=instance,
        soa_dir=soa_dir,
        load_deployments=False,
    )

    remote_refs = remote_git.list_remote_refs(utils.get_git_url(service))

    if 'refs/heads/paasta-%s' % service_config.get_deploy_group(
    ) not in remote_refs:
        print "No branches found for %s in %s." % \
            (service_config.get_deploy_group(), remote_refs)
        print "Has it been deployed there yet?"
        sys.exit(1)

    force_bounce = utils.format_timestamp(datetime.datetime.utcnow())
    issue_state_change_for_service(
        service_config=service_config,
        force_bounce=force_bounce,
        desired_state=desired_state,
    )
Esempio n. 10
0
def validate_min_max_instances(service_path):
    soa_dir, service = path_to_soa_dir_service(service_path)
    returncode = True

    for cluster in list_clusters(service, soa_dir):
        for instance in list_all_instances_for_service(service=service,
                                                       clusters=[cluster],
                                                       soa_dir=soa_dir):
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=False,
                soa_dir=soa_dir,
            )
            if instance_config.get_instance_type() != "tron":
                min_instances = instance_config.get_min_instances()
                max_instances = instance_config.get_max_instances()
                if min_instances is not None and max_instances is not None:
                    if max_instances < min_instances:
                        returncode = False
                        print(
                            failure(
                                f"Instance {instance} on cluster {cluster} has a greater number of min_instances than max_instances."
                                +
                                f"The number of min_instances ({min_instances}) cannot be greater than the max_instances ({max_instances}).",
                                "",
                            ))

    return returncode
Esempio n. 11
0
def get_service_instance_stats(service: str, instance: str,
                               cluster: str) -> Optional[ServiceInstanceStats]:
    """Returns a Dict with stats about a given service instance.

    Args:
        service: the service name
        instance: the instance name
        cluster: the cluster name where the service instance will be searched for

    Returns:
        A Dict mapping resource name to the amount of that resource the particular service instance consumes.
    """
    if service is None or instance is None or cluster is None:
        return None

    try:
        instance_config = get_instance_config(service, instance, cluster)
        # Get all fields that are showed in the 'paasta metastatus -vvv' command
        if instance_config.get_gpus():
            gpus = int(instance_config.get_gpus())
        else:
            gpus = 0
        service_instance_stats = ServiceInstanceStats(
            mem=instance_config.get_mem(),
            cpus=instance_config.get_cpus(),
            disk=instance_config.get_disk(),
            gpus=gpus,
        )
        return service_instance_stats
    except Exception as e:
        log.error(
            f"Failed to get stats for service {service} instance {instance}: {str(e)}"
        )
        return None
Esempio n. 12
0
def get_deploy_groups_used_by_framework(instance_type, service, soa_dir):
    """This is a kind of funny function that gets all the instances for specified
    service and framework, and massages it into a form that matches up with what
    deploy.yaml's steps look like. This is only so we can compare it 1-1
    with what deploy.yaml has for linting.

    :param instance_type: one of 'marathon', 'chronos', 'adhoc'
    :param service: the service name
    :param soa_dir: The SOA configuration directory to read from

    :returns: a list of deploy group names used by the service.
    """

    deploy_groups = []
    for cluster in list_clusters(service, soa_dir):
        for _, instance in get_service_instance_list(
                service=service,
                cluster=cluster,
                instance_type=instance_type,
                soa_dir=soa_dir,
        ):
            try:
                config = get_instance_config(
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    soa_dir=soa_dir,
                    load_deployments=False,
                    instance_type=instance_type,
                )
                deploy_groups.append(config.get_deploy_group())
            except NotImplementedError:
                pass
    return deploy_groups
def emit_metrics_for_type(instance_type):
    cluster = load_system_paasta_config().get_cluster()
    instances = get_services_for_cluster(cluster=cluster, instance_type=instance_type)

    for service, instance in instances:
        service_instance_config = get_instance_config(
            service=service, instance=instance, cluster=cluster
        )
        dimensions = {
            "paasta_service": service_instance_config.service,
            "paasta_cluster": service_instance_config.cluster,
            "paasta_instance": service_instance_config.instance,
            "paasta_pool": service_instance_config.get_pool(),
        }

        log.info(f"Emitting paasta.service.* with dimensions {dimensions}")
        gauge = yelp_meteorite.create_gauge("paasta.service.cpus", dimensions)
        gauge.set(service_instance_config.get_cpus())
        gauge = yelp_meteorite.create_gauge("paasta.service.mem", dimensions)
        gauge.set(service_instance_config.get_mem())
        gauge = yelp_meteorite.create_gauge("paasta.service.disk", dimensions)
        gauge.set(service_instance_config.get_disk())
        if hasattr(service_instance_config, "get_instances"):
            if service_instance_config.get_max_instances() is None:
                gauge = yelp_meteorite.create_gauge(
                    "paasta.service.instances", dimensions
                )
                gauge.set(service_instance_config.get_instances())
Esempio n. 14
0
 def config(self):
     return get_instance_config(
         self.service,
         self.instance,
         load_system_paasta_config().get_cluster(),
         load_deployments=False,
         soa_dir=self.soa_dir,
     )
Esempio n. 15
0
def paasta_start_or_stop(args, desired_state):
    """Requests a change of state to start or stop given branches of a service."""
    instance = args.instance
    clusters = args.clusters
    soa_dir = args.soa_dir
    service = figure_out_service_name(args=args, soa_dir=soa_dir)

    if args.clusters is not None:
        clusters = args.clusters.split(",")
    else:
        clusters = list_clusters(service)

    try:
        remote_refs = remote_git.list_remote_refs(
            utils.get_git_url(service, soa_dir))
    except remote_git.LSRemoteException as e:
        msg = (
            "Error talking to the git server: %s\n"
            "This PaaSTA command requires access to the git server to operate.\n"
            "The git server may be down or not reachable from here.\n"
            "Try again from somewhere where the git server can be reached, "
            "like your developer environment.") % str(e)
        print msg
        return 1

    invalid_deploy_groups = []
    for cluster in clusters:
        service_config = get_instance_config(
            service=service,
            cluster=cluster,
            instance=instance,
            soa_dir=soa_dir,
            load_deployments=False,
        )
        deploy_group = service_config.get_deploy_group()
        (deploy_tag, _) = get_latest_deployment_tag(remote_refs, deploy_group)

        if deploy_tag not in remote_refs:
            invalid_deploy_groups.append(deploy_group)
        else:
            force_bounce = utils.format_timestamp(datetime.datetime.utcnow())
            issue_state_change_for_service(
                service_config=service_config,
                force_bounce=force_bounce,
                desired_state=desired_state,
            )

    return_val = 0
    if invalid_deploy_groups:
        print "No branches found for %s in %s." % \
            (", ".join(invalid_deploy_groups), remote_refs)
        print "Has %s been deployed there yet?" % service
        return_val = 1

    return return_val
Esempio n. 16
0
def paasta_start_or_stop(args, desired_state):
    """Requests a change of state to start or stop given branches of a service."""
    instance = args.instance
    clusters = args.clusters
    soa_dir = args.soa_dir
    service = figure_out_service_name(args=args, soa_dir=soa_dir)

    if args.clusters is not None:
        clusters = args.clusters.split(",")
    else:
        clusters = list_clusters(service)

    try:
        remote_refs = remote_git.list_remote_refs(utils.get_git_url(service, soa_dir))
    except remote_git.LSRemoteException as e:
        msg = (
            "Error talking to the git server: %s\n"
            "This PaaSTA command requires access to the git server to operate.\n"
            "The git server may be down or not reachable from here.\n"
            "Try again from somewhere where the git server can be reached, "
            "like your developer environment."
        ) % str(e)
        print msg
        return 1

    invalid_deploy_groups = []
    for cluster in clusters:
        service_config = get_instance_config(
            service=service,
            cluster=cluster,
            instance=instance,
            soa_dir=soa_dir,
            load_deployments=False,
        )
        deploy_group = service_config.get_deploy_group()
        (deploy_tag, _) = get_latest_deployment_tag(remote_refs, deploy_group)

        if deploy_tag not in remote_refs:
            invalid_deploy_groups.append(deploy_group)
        else:
            force_bounce = utils.format_timestamp(datetime.datetime.utcnow())
            issue_state_change_for_service(
                service_config=service_config,
                force_bounce=force_bounce,
                desired_state=desired_state,
            )

    return_val = 0
    if invalid_deploy_groups:
        print "No branches found for %s in %s." % \
            (", ".join(invalid_deploy_groups), remote_refs)
        print "Has %s been deployed there yet?" % service
        return_val = 1

    return return_val
Esempio n. 17
0
def paasta_restart(args):
    pargs = apply_args_filters(args)
    soa_dir = args.soa_dir

    affected_flinks = []
    affected_non_flinks = []
    for cluster, service_instances in pargs.items():
        for service, instances in service_instances.items():
            for instance in instances.keys():
                service_config = get_instance_config(
                    service=service,
                    cluster=cluster,
                    instance=instance,
                    soa_dir=soa_dir,
                    load_deployments=False,
                )
                if isinstance(service_config, FlinkDeploymentConfig):
                    affected_flinks.append(service_config)
                else:
                    affected_non_flinks.append(service_config)

    if affected_flinks:
        flinks_info = ", ".join(
            [f"{f.service}.{f.instance}" for f in affected_flinks])
        print(
            f"WARN: paasta restart is currently unsupported for Flink instances ({flinks_info})."
        )
        print("To restart, please run:", end="\n\n")
        for flink in affected_flinks:
            print(
                f"paasta stop -s {flink.service} -i {flink.instance} -c {flink.cluster}"
            )
            print(
                f"paasta start -s {flink.service} -i {flink.instance} -c {flink.cluster}",
                end="\n\n",
            )

        if not affected_non_flinks:
            return 1

        non_flinks_info = ", ".join(
            [f"{f.service}.{f.instance}" for f in affected_non_flinks])
        proceed = choice.Binary(
            f"Would you like to restart the other instances ({non_flinks_info}) anyway?",
            False,
        ).ask()

        if not proceed:
            return 1

    return paasta_start(args)
Esempio n. 18
0
def update_autoscaler_count(request):
    service = request.swagger_data.get("service")
    instance = request.swagger_data.get("instance")
    cluster = settings.cluster
    soa_dir = settings.soa_dir
    desired_instances = request.swagger_data.get(
        "json_body")["desired_instances"]
    if not isinstance(desired_instances, int):
        error_message = 'The provided body does not have an integer value for "desired_instances": {}'.format(
            request.swagger_data.get("json_body"))
        raise ApiFailure(error_message, 500)

    instance_config = get_instance_config(service, instance, cluster, soa_dir,
                                          True)
    if not isinstance(instance_config,
                      (KubernetesDeploymentConfig, MarathonServiceConfig)):
        error_message = (
            f"Autoscaling is not supported for {service}.{instance} because instance type is not "
            f"marathon or kubernetes.")
        raise ApiFailure(error_message, 501)

    max_instances = instance_config.get_max_instances()
    if max_instances is None:
        error_message = f"Autoscaling is not enabled for {service}.{instance}"
        raise ApiFailure(error_message, 404)
    min_instances = instance_config.get_min_instances()

    status = "SUCCESS"
    if desired_instances > max_instances:
        desired_instances = max_instances
        status = (
            "WARNING desired_instances is greater than max_instances %d" %
            max_instances)
    elif desired_instances < min_instances:
        desired_instances = min_instances
        status = ("WARNING desired_instances is less than min_instances %d" %
                  min_instances)
    try:
        if isinstance(instance_config, KubernetesDeploymentConfig):
            instance_config.set_autoscaled_instances(
                instance_count=desired_instances,
                kube_client=settings.kubernetes_client)
        else:
            instance_config.set_autoscaled_instances(
                instance_count=desired_instances)
    except Exception as err:
        raise ApiFailure(err, 500)

    response_body = {"desired_instances": desired_instances, "status": status}
    return Response(json_body=response_body, status_code=202)
def send_sfx_event(service, instance, cluster):
    if yelp_meteorite:
        service_instance_config = get_instance_config(service=service,
                                                      instance=instance,
                                                      cluster=cluster)
        yelp_meteorite.events.emit_event(
            "paasta.service.oom_events",
            dimensions={
                "paasta_cluster": cluster,
                "paasta_instance": instance,
                "paasta_service": service,
                "paasta_pool": service_instance_config.get_pool(),
            },
        )
Esempio n. 20
0
def test_get_instance_Config_chronos(
    mock_load_chronos_job_config,
    mock_validate_service_instance,
):
    mock_validate_service_instance.return_value = 'chronos'
    mock_load_chronos_job_config.return_value = 'fake_service_config'
    actual = utils.get_instance_config(
        service='fake_service',
        instance='fake_instance',
        cluster='fake_cluster',
        soa_dir='fake_soa_dir',
    )
    assert mock_validate_service_instance.call_count == 1
    assert mock_load_chronos_job_config.call_count == 1
    assert actual == 'fake_service_config'
Esempio n. 21
0
def test_get_instance_config_kubernetes(
    mock_load_kubernetes_service_config,
    mock_validate_service_instance,
):
    mock_validate_service_instance.return_value = 'kubernetes'
    mock_load_kubernetes_service_config.return_value = 'fake_service_config'
    actual = utils.get_instance_config(
        service='fake_service',
        instance='fake_instance',
        cluster='fake_cluster',
        soa_dir='fake_soa_dir',
    )
    assert mock_validate_service_instance.call_count == 1
    assert mock_load_kubernetes_service_config.call_count == 1
    assert actual == 'fake_service_config'
Esempio n. 22
0
def test_get_instance_config_marathon(
    mock_load_marathon_service_config,
    mock_validate_service_instance,
):
    mock_validate_service_instance.return_value = 'marathon'
    mock_load_marathon_service_config.return_value = 'fake_service_config'
    actual = utils.get_instance_config(
        service='fake_service',
        instance='fake_instance',
        cluster='fake_cluster',
        soa_dir='fake_soa_dir',
    )
    assert mock_validate_service_instance.call_count == 1
    assert mock_load_marathon_service_config.call_count == 1
    assert actual == 'fake_service_config'
Esempio n. 23
0
def test_get_instance_config_by_instance_type(
    mock_validate_service_instance, ):
    instance_type = "fake_type"
    mock_validate_service_instance.return_value = instance_type
    mock_load_config = mock.MagicMock()
    mock_load_config.return_value = "fake_service_config"
    utils.INSTANCE_TYPE_HANDLERS[instance_type] = (None, mock_load_config)
    actual = utils.get_instance_config(
        service="fake_service",
        instance="fake_instance",
        cluster="fake_cluster",
        soa_dir="fake_soa_dir",
    )
    assert mock_validate_service_instance.call_count == 1
    assert mock_load_config.call_count == 1
    assert actual == "fake_service_config"
Esempio n. 24
0
def main(sys_argv):
    args = parse_args(sys_argv[1:])
    cluster = load_system_paasta_config().get_cluster()
    victims = latest_oom_events(cluster, args.superregion)
    for (service, instance) in get_services_for_cluster(cluster, soa_dir=args.soa_dir):
        try:
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=False,
                soa_dir=args.soa_dir,
            )
            oom_events = victims.get((service, instance), [])
            send_sensu_event(instance_config, oom_events, args)
        except NotImplementedError:  # When instance_type is not supported by get_instance_config
            pass
Esempio n. 25
0
def send_sfx_event(service, instance, cluster):
    if yelp_meteorite:
        service_instance_config = get_instance_config(
            service=service, instance=instance, cluster=cluster
        )
        dimensions = {
            "paasta_cluster": cluster,
            "paasta_instance": instance,
            "paasta_service": service,
            "paasta_pool": service_instance_config.get_pool(),
        }
        yelp_meteorite.events.emit_event(
            "paasta.service.oom_events", dimensions=dimensions,
        )
        counter = yelp_meteorite.create_counter(
            "paasta.service.oom_count", default_dimensions=dimensions,
        )
        counter.count()
Esempio n. 26
0
def get_autoscaler_count(request):
    service = request.swagger_data.get("service")
    instance = request.swagger_data.get("instance")
    cluster = settings.cluster
    soa_dir = settings.soa_dir

    instance_config = get_instance_config(service, instance, cluster, soa_dir)
    if not isinstance(instance_config,
                      (KubernetesDeploymentConfig, MarathonServiceConfig)):
        error_message = (
            f"Autoscaling is not supported for {service}.{instance} because instance type is not "
            f"marathon or kubernetes.")
        raise ApiFailure(error_message, 501)

    response_body = {
        "desired_instances": instance_config.get_instances(),
        "calculated_instances":
        instance_config.get_instances(with_limit=False),
    }
    return Response(json_body=response_body, status_code=200)
Esempio n. 27
0
    def get_rules(self, soa_dir, synapse_service_dir):
        try:
            conf = get_instance_config(
                self.service,
                self.instance,
                load_system_paasta_config().get_cluster(),
                load_deployments=False,
                soa_dir=soa_dir,
            )
        except NotImplementedError:
            # PAASTA-11414: new instance types may not provide this configuration information;
            # we don't want to break all of the firewall infrastructure when that happens
            return ()

        if conf.get_dependencies() is None:
            return ()

        rules = list(_default_rules(conf, self.log_prefix))
        rules.extend(_well_known_rules(conf))
        rules.extend(_smartstack_rules(conf, soa_dir, synapse_service_dir))
        return tuple(rules)
Esempio n. 28
0
def paasta_start_or_stop(args, desired_state):
    """Requests a change of state to start or stop given branches of a service."""
    instance = args.instance
    cluster = args.cluster
    soa_dir = args.soa_dir
    service = figure_out_service_name(args=args, soa_dir=soa_dir)

    service_config = get_instance_config(
        service=service,
        cluster=cluster,
        instance=instance,
        soa_dir=soa_dir,
        load_deployments=False,
    )

    try:
        remote_refs = remote_git.list_remote_refs(utils.get_git_url(service))
    except remote_git.LSRemoteException as e:
        msg = (
            "Error talking to the git server: %s\n"
            "This PaaSTA command requires access to the git server to operate.\n"
            "The git server may be down or not reachable from here.\n"
            "Try again from somewhere where the git server can be reached, "
            "like your developer environment."
        ) % str(e)
        print msg
        return 1

    if 'refs/heads/paasta-%s' % service_config.get_deploy_group() not in remote_refs:
        print "No branches found for %s in %s." % \
            (service_config.get_deploy_group(), remote_refs)
        print "Has it been deployed there yet?"
        return 1

    force_bounce = utils.format_timestamp(datetime.datetime.utcnow())
    issue_state_change_for_service(
        service_config=service_config,
        force_bounce=force_bounce,
        desired_state=desired_state,
    )
Esempio n. 29
0
def configure_and_run_docker_container(
        docker_client,
        docker_hash,
        service,
        instance,
        cluster,
        args,
        pull_image=False,
        dry_run=False
):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        sys.stdout.write(PaastaColors.yellow(
            "Warning: Couldn't load config files from '/etc/paasta'. This indicates\n"
            "PaaSTA is not configured locally on this host, and local-run may not behave\n"
            "the same way it would behave on a server configured for PaaSTA.\n"
        ))
        system_paasta_config = SystemPaastaConfig({"volumes": []}, '/etc/paasta')

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    instance_type = validate_service_instance(service, instance, cluster, soa_dir)

    try:
        instance_config = get_instance_config(
            service=service,
            instance=instance,
            cluster=cluster,
            load_deployments=pull_image,
            soa_dir=soa_dir,
        )
    except NoDeploymentsAvailable:
        sys.stderr.write(PaastaColors.red(
            "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
            "You can generate this by running:\n"
            "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n" % {
                'soa_dir': soa_dir, 'service': service}))
        return

    if pull_image:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(), instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(PaastaColors.red(
                "Error: No sha has been marked for deployment for the %s deploy group.\n"
                "Please ensure this service has either run through a jenkins pipeline "
                "or paasta mark-for-deployment has been run for %s" % (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))

    if args.interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config), posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=args.interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
    )
Esempio n. 30
0
def configure_and_run_docker_container(
        docker_client,
        docker_hash,
        service,
        instance,
        cluster,
        system_paasta_config,
        args,
        pull_image=False,
        dry_run=False
):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    load_deployments = docker_hash is None or pull_image

    interactive = args.interactive

    try:
        if instance is None and args.healthcheck:
            sys.stderr.write("With --healthcheck, --instance must be provided!\n")
            sys.exit(1)
        if instance is None:
            instance_type = 'adhoc'
            instance = 'interactive'
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(service, instance, cluster, soa_dir)
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        sys.stderr.write(str(e) + '\n')
        return
    except NoDeploymentsAvailable:
        sys.stderr.write(PaastaColors.red(
            "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
            "You can generate this by running:\n"
            "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n" % {
                'soa_dir': soa_dir, 'service': service}))
        return

    if docker_hash is None:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(), instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(PaastaColors.red(
                "Error: No sha has been marked for deployment for the %s deploy group.\n"
                "Please ensure this service has either run through a jenkins pipeline "
                "or paasta mark-for-deployment has been run for %s\n" % (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))

    if interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config), posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
    )
Esempio n. 31
0
def paasta_spark_run(args):
    # argparse does not work as expected with both default and
    # type=validate_work_dir.
    validate_work_dir(args.work_dir)

    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        print(
            PaastaColors.yellow(
                "Warning: Couldn't load config files from '/etc/paasta'. This indicates"
                "PaaSTA is not configured locally on this host, and local-run may not behave"
                "the same way it would behave on a server configured for PaaSTA."
            ),
            sep="\n",
        )
        system_paasta_config = SystemPaastaConfig({"volumes": []}, "/etc/paasta")

    if args.cmd == "jupyter-lab" and not args.build and not args.image:
        print(
            PaastaColors.red(
                "The jupyter-lab command requires a prebuilt image with -I or --image."
            ),
            file=sys.stderr,
        )
        return 1

    # Use the default spark:client instance configs if not provided
    try:
        instance_config = get_instance_config(
            service=args.service,
            instance=args.instance,
            cluster=system_paasta_config.get_cluster_aliases().get(
                args.cluster, args.cluster
            ),
            load_deployments=args.build is False and args.image is None,
            soa_dir=args.yelpsoa_config_root,
        )
    except NoConfigurationForServiceError as e:
        print(str(e), file=sys.stderr)
        return 1
    except NoDeploymentsAvailable:
        print(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s."
                "You can generate this by running:"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s"
                % {"soa_dir": args.yelpsoa_config_root, "service": args.service}
            ),
            sep="\n",
            file=sys.stderr,
        )
        return 1

    if not args.cmd and not instance_config.get_cmd():
        print(
            "A command is required, pyspark, spark-shell, spark-submit or jupyter",
            file=sys.stderr,
        )
        return 1

    aws_creds = get_aws_credentials(
        service=args.service,
        no_aws_credentials=args.no_aws_credentials,
        aws_credentials_yaml=args.aws_credentials_yaml,
        profile_name=args.aws_profile,
    )
    docker_image = get_docker_image(args, instance_config)
    if docker_image is None:
        return 1

    pod_template_path = generate_pod_template_path()
    args.enable_compact_bin_packing = should_enable_compact_bin_packing(
        args.disable_compact_bin_packing, args.cluster_manager
    )

    volumes = instance_config.get_volumes(system_paasta_config.get_volumes())
    app_base_name = get_spark_app_name(args.cmd or instance_config.get_cmd())

    if args.enable_compact_bin_packing:
        document = POD_TEMPLATE.format(
            spark_pod_label=limit_size_with_hash(f"exec-{app_base_name}"),
        )
        parsed_pod_template = yaml.load(document)
        with open(pod_template_path, "w") as f:
            yaml.dump(parsed_pod_template, f)

    needs_docker_cfg = not args.build
    user_spark_opts = _parse_user_spark_args(
        args.spark_args, pod_template_path, args.enable_compact_bin_packing
    )

    args.cmd = _auto_add_timeout_for_job(args.cmd, args.timeout_job_runtime)

    # This is required if configs are provided as part of `spark-submit`
    # Other way to provide is with --spark-args
    sub_cmds = args.cmd.split(" ")  # spark.driver.memory=10g
    for cmd in sub_cmds:
        if cmd.startswith("spark.driver.memory") or cmd.startswith(
            "spark.driver.cores"
        ):
            key, value = cmd.split("=")
            user_spark_opts[key] = value

    paasta_instance = get_smart_paasta_instance_name(args)
    auto_set_temporary_credentials_provider = (
        args.disable_temporary_credentials_provider is False
    )
    spark_conf = get_spark_conf(
        cluster_manager=args.cluster_manager,
        spark_app_base_name=app_base_name,
        docker_img=docker_image,
        user_spark_opts=user_spark_opts,
        paasta_cluster=args.cluster,
        paasta_pool=args.pool,
        paasta_service=args.service,
        paasta_instance=paasta_instance,
        extra_volumes=volumes,
        aws_creds=aws_creds,
        needs_docker_cfg=needs_docker_cfg,
        auto_set_temporary_credentials_provider=auto_set_temporary_credentials_provider,
    )
    # Experimental: TODO: Move to service_configuration_lib once confirmed that there are no issues
    # Enable AQE: Adaptive Query Execution
    if "spark.sql.adaptive.enabled" not in spark_conf:
        spark_conf["spark.sql.adaptive.enabled"] = "true"
        aqe_msg = "Spark performance improving feature Adaptive Query Execution (AQE) is enabled. Set spark.sql.adaptive.enabled as false to disable."
        log.info(aqe_msg)
        print(PaastaColors.blue(aqe_msg))
    return configure_and_run_docker_container(
        args,
        docker_img=docker_image,
        instance_config=instance_config,
        system_paasta_config=system_paasta_config,
        spark_conf=spark_conf,
        aws_creds=aws_creds,
        cluster_manager=args.cluster_manager,
        pod_template_path=pod_template_path,
    )
Esempio n. 32
0
def configure_and_run_docker_container(docker_client,
                                       docker_hash,
                                       service,
                                       instance,
                                       cluster,
                                       system_paasta_config,
                                       args,
                                       pull_image=False,
                                       dry_run=False):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    load_deployments = docker_hash is None or pull_image

    interactive = args.interactive

    try:
        if instance is None and args.healthcheck:
            sys.stderr.write(
                "With --healthcheck, --instance must be provided!\n")
            sys.exit(1)
        if instance is None:
            instance_type = 'adhoc'
            instance = 'interactive'
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(service, instance,
                                                      cluster, soa_dir)
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        sys.stderr.write(str(e) + '\n')
        return
    except NoDeploymentsAvailable:
        sys.stderr.write(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
                "You can generate this by running:\n"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n"
                % {
                    'soa_dir': soa_dir,
                    'service': service
                }))
        return

    if docker_hash is None:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(),
                instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(
                PaastaColors.red(
                    "Error: No sha has been marked for deployment for the %s deploy group.\n"
                    "Please ensure this service has either run through a jenkins pipeline "
                    "or paasta mark-for-deployment has been run for %s\n" %
                    (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' %
                       (volume['hostPath'], volume['containerPath'],
                        volume['mode'].lower()))

    if interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config),
                                  posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
    )
Esempio n. 33
0
def _run_instance_worker(cluster_data, instances_out, green_light):
    """Get instances from the instances_in queue and check them one by one.

    If an instance isn't deployed, add it to the instances_out queue
    to re-check it later.

    :param cluster_data: an instance of ClusterData.
    :param instances_out: See the docstring for instances_deployed().
    :param green_light: See the docstring for _query_clusters().
    """

    api = client.get_paasta_api_client(cluster=cluster_data.cluster)
    if not api:
        log.warning("Couldn't reach the PaaSTA api for {}! Assuming it is not "
                    "deployed there yet.".format(cluster_data.cluster))
        while not cluster_data.instances_queue.empty():
            try:
                instance = cluster_data.instances_queue.get(block=False)
            except Empty:
                return
            cluster_data.instances_queue.task_done()
            instances_out.put(instance)

    while not cluster_data.instances_queue.empty() and green_light.is_set():
        try:
            instance = cluster_data.instances_queue.get(block=False)
        except Empty:
            return

        instance_config = get_instance_config(
            cluster_data.service,
            instance,
            cluster_data.cluster,
            load_deployments=False,
            soa_dir=cluster_data.soa_dir,
        )

        log.debug("Inspecting the deployment status of {}.{} on {}"
                  .format(cluster_data.service, instance, cluster_data.cluster))
        try:
            status = None
            status = api.service.status_instance(
                service=cluster_data.service,
                instance=instance,
            ).result()
        except HTTPError as e:
            if e.response.status_code == 404:
                log.warning("Can't get status for instance {}, service {} in "
                            "cluster {}. This is normally because it is a new "
                            "service that hasn't been deployed by PaaSTA yet"
                            .format(
                                instance, cluster_data.service,
                                cluster_data.cluster,
                            ))
            else:
                log.warning("Error getting service status from PaaSTA API for {}: {}"
                            "{}".format(
                                cluster_data.cluster, e.response.status_code,
                                e.response.text,
                            ))
        except ConnectionError as e:
            log.warning("Error getting service status from PaaSTA API for {}:"
                        "{}".format(cluster_data.cluster, e))

        if not status:
            log.debug("No status for {}.{}, in {}. Not deployed yet."
                      .format(
                          cluster_data.service, instance,
                          cluster_data.cluster,
                      ))
            cluster_data.instances_queue.task_done()
            instances_out.put(instance)
        elif not status.marathon:
            log.debug("{}.{} in {} is not a Marathon job. Marked as deployed."
                      .format(
                          cluster_data.service, instance,
                          cluster_data.cluster,
                      ))
        elif (
            status.marathon.expected_instance_count == 0 or
            status.marathon.desired_state == 'stop'
        ):
            log.debug("{}.{} in {} is marked as stopped. Marked as deployed."
                      .format(
                          cluster_data.service, status.instance,
                          cluster_data.cluster,
                      ))
        else:
            if status.marathon.app_count != 1:
                paasta_print("  {}.{} on {} is still bouncing, {} versions "
                             "running"
                             .format(
                                 cluster_data.service, status.instance,
                                 cluster_data.cluster,
                                 status.marathon.app_count,
                             ))
                cluster_data.instances_queue.task_done()
                instances_out.put(instance)
                continue
            if not cluster_data.git_sha.startswith(status.git_sha):
                paasta_print("  {}.{} on {} doesn't have the right sha yet: {}"
                             .format(
                                 cluster_data.service, instance,
                                 cluster_data.cluster, status.git_sha,
                             ))
                cluster_data.instances_queue.task_done()
                instances_out.put(instance)
                continue
            if status.marathon.deploy_status not in ['Running', 'Deploying', 'Waiting']:
                paasta_print("  {}.{} on {} isn't running yet: {}"
                             .format(
                                 cluster_data.service, instance,
                                 cluster_data.cluster,
                                 status.marathon.deploy_status,
                             ))
                cluster_data.instances_queue.task_done()
                instances_out.put(instance)
                continue

            # The bounce margin factor defines what proportion of instances we need to be "safe",
            # so consider it scaled up "enough" if we have that proportion of instances ready.
            required_instance_count = int(math.ceil(
                instance_config.get_bounce_margin_factor() * status.marathon.expected_instance_count,
            ))
            if required_instance_count > status.marathon.running_instance_count:
                paasta_print("  {}.{} on {} isn't scaled up yet, "
                             "has {} out of {} required instances (out of a total of {})"
                             .format(
                                 cluster_data.service, instance,
                                 cluster_data.cluster,
                                 status.marathon.running_instance_count,
                                 required_instance_count,
                                 status.marathon.expected_instance_count,
                             ))
                cluster_data.instances_queue.task_done()
                instances_out.put(instance)
                continue
            paasta_print("Complete: {}.{} on {} looks 100% deployed at {} "
                         "instances on {}"
                         .format(
                             cluster_data.service, instance,
                             cluster_data.cluster,
                             status.marathon.running_instance_count,
                             status.git_sha,
                         ))
            cluster_data.instances_queue.task_done()
Esempio n. 34
0
def paasta_spark_run(args):
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        paasta_print(
            PaastaColors.yellow(
                "Warning: Couldn't load config files from '/etc/paasta'. This indicates"
                "PaaSTA is not configured locally on this host, and local-run may not behave"
                "the same way it would behave on a server configured for PaaSTA.",
            ),
            sep='\n',
        )
        system_paasta_config = SystemPaastaConfig({"volumes": []},
                                                  '/etc/paasta')

    # Use the default spark:client instance configs if not provided
    try:
        instance_config = get_instance_config(
            service=args.service,
            instance=args.instance,
            cluster=args.cluster,
            load_deployments=args.build is False,
            soa_dir=args.yelpsoa_config_root,
        )
    except NoConfigurationForServiceError as e:
        paasta_print(str(e), file=sys.stderr)
        return 1
    except NoDeploymentsAvailable:
        paasta_print(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s."
                "You can generate this by running:"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s"
                % {
                    'soa_dir': args.yelpsoa_config_root,
                    'service': args.service,
                }, ),
            sep='\n',
            file=sys.stderr,
        )
        return 1

    if args.build:
        docker_url = build_and_push_docker_image(args)
        if docker_url is None:
            return 1
    else:
        try:
            docker_url = instance_config.get_docker_url()
        except NoDockerImageError:
            paasta_print(
                PaastaColors.red(
                    "Error: No sha has been marked for deployment for the %s deploy group.\n"
                    "Please ensure this service has either run through a jenkins pipeline "
                    "or paasta mark-for-deployment has been run for %s\n" % (
                        instance_config.get_deploy_group(),
                        args.service,
                    ), ),
                sep='',
                file=sys.stderr,
            )
            return 1
        paasta_print(
            "Please wait while the image (%s) is pulled (times out after 5m)..."
            % docker_url,
            file=sys.stderr,
        )
        retcode, _ = _run('sudo -H docker pull %s' % docker_url,
                          stream=True,
                          timeout=300)
        if retcode != 0:
            paasta_print(
                "\nPull failed. Are you authorized to run docker commands?",
                file=sys.stderr,
            )
            return 1

    return configure_and_run_docker_container(
        args,
        docker_img=docker_url,
        instance_config=instance_config,
        system_paasta_config=system_paasta_config,
    )
Esempio n. 35
0
def configure_and_run_docker_container(docker_client,
                                       docker_hash,
                                       service,
                                       instance,
                                       cluster,
                                       args,
                                       pull_image=False,
                                       dry_run=False):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        sys.stdout.write(
            PaastaColors.yellow(
                "Warning: Couldn't load config files from '/etc/paasta'. This indicates\n"
                "PaaSTA is not configured locally on this host, and local-run may not behave\n"
                "the same way it would behave on a server configured for PaaSTA.\n"
            ))
        system_paasta_config = SystemPaastaConfig({"volumes": []},
                                                  '/etc/paasta')

    soa_dir = args.yelpsoa_config_root

    volumes = list()

    instance_type = validate_service_instance(service, instance, cluster,
                                              soa_dir)

    load_deployments = docker_hash is None or pull_image

    try:
        instance_config = get_instance_config(
            service=service,
            instance=instance,
            cluster=cluster,
            load_deployments=load_deployments,
            soa_dir=soa_dir,
        )
    except NoDeploymentsAvailable:
        sys.stderr.write(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s.\n"
                "You can generate this by running:\n"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s\n"
                % {
                    'soa_dir': soa_dir,
                    'service': service
                }))
        return

    if docker_hash is None:
        try:
            docker_url = get_docker_url(
                system_paasta_config.get_docker_registry(),
                instance_config.get_docker_image())
        except NoDockerImageError:
            sys.stderr.write(
                PaastaColors.red(
                    "Error: No sha has been marked for deployment for the %s deploy group.\n"
                    "Please ensure this service has either run through a jenkins pipeline "
                    "or paasta mark-for-deployment has been run for %s" %
                    (instance_config.get_deploy_group(), service)))
            return
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' %
                       (volume['hostPath'], volume['containerPath'],
                        volume['mode'].lower()))

    if args.interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd, posix=False)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = shlex.split(command_modifier(command_from_config),
                                  posix=False)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=args.interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
    )
Esempio n. 36
0
def paasta_start_or_stop(args, desired_state):
    """Requests a change of state to start or stop given branches of a service."""
    soa_dir = args.soa_dir
    service = figure_out_service_name(args=args, soa_dir=soa_dir)
    instances = args.instances.split(",") if args.instances else None

    # assert that each of the clusters that the user specifies are 'valid'
    # for the instance list provided; that is, assert that at least one of the instances
    # provided in the -i argument is deployed there.
    # if there are no instances defined in the args, then assert that the service
    # is deployed to that cluster.
    # If args.clusters is falsey, then default to *all* clusters that a service is deployed to,
    # and we figure out which ones are needed for each service later.
    if instances:
        instance_clusters = [list_clusters(service, soa_dir, instance) for instance in args.instances]
        valid_clusters = sorted(list(set([cluster for cluster_list in instance_clusters for cluster in cluster_list])))
    else:
        valid_clusters = list_clusters(service, soa_dir)

    if args.clusters:
        clusters = args.clusters.split(",")
        invalid_clusters = [cluster for cluster in clusters if cluster not in valid_clusters]
        if invalid_clusters:
            print ("Invalid cluster name(s) specified: %s." "Valid options: %s") % (
                " ".join(invalid_clusters),
                " ".join(valid_clusters),
            )
            return 1
    else:
        clusters = valid_clusters

    try:
        remote_refs = remote_git.list_remote_refs(utils.get_git_url(service, soa_dir))
    except remote_git.LSRemoteException as e:
        msg = (
            "Error talking to the git server: %s\n"
            "This PaaSTA command requires access to the git server to operate.\n"
            "The git server may be down or not reachable from here.\n"
            "Try again from somewhere where the git server can be reached, "
            "like your developer environment."
        ) % str(e)
        print msg
        return 1

    invalid_deploy_groups = []
    marathon_message_printed, chronos_message_printed = False, False
    for cluster in clusters:
        # If they haven't specified what instances to act on, do it for all of them.
        # If they have specified what instances, only iterate over them if they're
        # actually within this cluster.
        if instances is None:
            cluster_instances = list_all_instances_for_service(service, clusters=[cluster], soa_dir=soa_dir)
            print ("no instances specified; restarting all instances for service")
        else:
            all_cluster_instances = list_all_instances_for_service(service, clusters=[cluster], soa_dir=soa_dir)
            cluster_instances = all_cluster_instances.intersection(set(instances))

        for instance in cluster_instances:
            service_config = get_instance_config(
                service=service, cluster=cluster, instance=instance, soa_dir=soa_dir, load_deployments=False
            )
            deploy_group = service_config.get_deploy_group()
            (deploy_tag, _) = get_latest_deployment_tag(remote_refs, deploy_group)

            if deploy_tag not in remote_refs:
                invalid_deploy_groups.append(deploy_group)
            else:
                force_bounce = utils.format_timestamp(datetime.datetime.utcnow())
                if isinstance(service_config, MarathonServiceConfig) and not marathon_message_printed:
                    print_marathon_message(desired_state)
                    marathon_message_printed = True
                elif isinstance(service_config, ChronosJobConfig) and not chronos_message_printed:
                    print_chronos_message(desired_state)
                    chronos_message_printed = True

                issue_state_change_for_service(
                    service_config=service_config, force_bounce=force_bounce, desired_state=desired_state
                )

    return_val = 0
    if invalid_deploy_groups:
        print "No branches found for %s in %s." % (", ".join(invalid_deploy_groups), remote_refs)
        print "Has %s been deployed there yet?" % service
        return_val = 1

    return return_val
Esempio n. 37
0
def paasta_start_or_stop(args, desired_state):
    """Requests a change of state to start or stop given branches of a service."""
    soa_dir = args.soa_dir
    service = figure_out_service_name(args=args, soa_dir=soa_dir)

    if args.clusters is not None:
        clusters = args.clusters.split(",")
    else:
        clusters = list_clusters(service)

    if args.instances is not None:
        instances = args.instances.split(",")
    else:
        instances = None

    try:
        remote_refs = remote_git.list_remote_refs(utils.get_git_url(service, soa_dir))
    except remote_git.LSRemoteException as e:
        msg = (
            "Error talking to the git server: %s\n"
            "This PaaSTA command requires access to the git server to operate.\n"
            "The git server may be down or not reachable from here.\n"
            "Try again from somewhere where the git server can be reached, "
            "like your developer environment."
        ) % str(e)
        print msg
        return 1

    invalid_deploy_groups = []
    marathon_message_printed, chronos_message_printed = False, False
    for cluster in clusters:
        # If they haven't specified what instances to act on, do it for all of them.
        # If they have specified what instances, only iterate over them if they're
        # actually within this cluster.
        if instances is None:
            cluster_instances = list_all_instances_for_service(service, clusters=[cluster], soa_dir=soa_dir)
        else:
            all_cluster_instances = list_all_instances_for_service(service, clusters=[cluster], soa_dir=soa_dir)
            cluster_instances = all_cluster_instances.intersection(set(instances))

        for instance in cluster_instances:
            service_config = get_instance_config(
                service=service,
                cluster=cluster,
                instance=instance,
                soa_dir=soa_dir,
                load_deployments=False,
            )
            deploy_group = service_config.get_deploy_group()
            (deploy_tag, _) = get_latest_deployment_tag(remote_refs, deploy_group)

            if deploy_tag not in remote_refs:
                invalid_deploy_groups.append(deploy_group)
            else:
                force_bounce = utils.format_timestamp(datetime.datetime.utcnow())
                if isinstance(service_config, MarathonServiceConfig) and not marathon_message_printed:
                    print_marathon_message(desired_state)
                    marathon_message_printed = True
                elif isinstance(service_config, ChronosJobConfig) and not chronos_message_printed:
                    print_chronos_message(desired_state)
                    chronos_message_printed = True

                issue_state_change_for_service(
                    service_config=service_config,
                    force_bounce=force_bounce,
                    desired_state=desired_state,
                )

    return_val = 0
    if invalid_deploy_groups:
        print "No branches found for %s in %s." % \
            (", ".join(invalid_deploy_groups), remote_refs)
        print "Has %s been deployed there yet?" % service
        return_val = 1

    return return_val
Esempio n. 38
0
def configure_and_run_docker_container(
    docker_client,
    docker_url,
    docker_sha,
    service,
    instance,
    cluster,
    system_paasta_config,
    args,
    pull_image=False,
    dry_run=False,
):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    if instance is None and args.healthcheck_only:
        paasta_print(
            "With --healthcheck-only, --instance MUST be provided!", file=sys.stderr
        )
        return 1
    if instance is None and not sys.stdin.isatty():
        paasta_print(
            "--instance and --cluster must be specified when using paasta local-run without a tty!",
            file=sys.stderr,
        )
        return 1

    soa_dir = args.yelpsoa_config_root
    volumes = list()
    load_deployments = (docker_url is None or pull_image) and not docker_sha
    interactive = args.interactive

    try:
        if instance is None:
            instance_type = "adhoc"
            instance = "interactive"
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(
                service, instance, cluster, soa_dir
            )
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        paasta_print(str(e), file=sys.stderr)
        return 1
    except NoDeploymentsAvailable:
        paasta_print(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s. "
                "You can generate this by running: "
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s"
                % {"soa_dir": soa_dir, "service": service}
            ),
            sep="\n",
            file=sys.stderr,
        )
        return 1

    if docker_sha is not None:
        instance_config.branch_dict = {
            "git_sha": docker_sha,
            "docker_image": build_docker_image_name(service=service, sha=docker_sha),
            "desired_state": "start",
            "force_bounce": None,
        }

    if docker_url is None:
        try:
            docker_url = instance_config.get_docker_url()
        except NoDockerImageError:
            if instance_config.get_deploy_group() is None:
                paasta_print(
                    PaastaColors.red(
                        f"Error: {service}.{instance} has no 'deploy_group' set. Please set one so "
                        "the proper image can be used to run for this service."
                    ),
                    sep="",
                    file=sys.stderr,
                )
            else:
                paasta_print(
                    PaastaColors.red(
                        "Error: No sha has been marked for deployment for the %s deploy group.\n"
                        "Please ensure this service has either run through a jenkins pipeline "
                        "or paasta mark-for-deployment has been run for %s\n"
                        % (instance_config.get_deploy_group(), service)
                    ),
                    sep="",
                    file=sys.stderr,
                )
            return 1

    if pull_image:
        docker_pull_image(docker_url)

    for volume in instance_config.get_volumes(system_paasta_config.get_volumes()):
        if os.path.exists(volume["hostPath"]):
            volumes.append(
                "{}:{}:{}".format(
                    volume["hostPath"], volume["containerPath"], volume["mode"].lower()
                )
            )
        else:
            paasta_print(
                PaastaColors.yellow(
                    "Warning: Path %s does not exist on this host. Skipping this binding."
                    % volume["hostPath"]
                ),
                file=sys.stderr,
            )

    if interactive is True and args.cmd is None:
        command = "bash"
    elif args.cmd:
        command = args.cmd
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command = format_command_for_type(
                command=command_from_config, instance_type=instance_type, date=args.date
            )
        else:
            command = instance_config.get_args()

    secret_provider_kwargs = {
        "vault_cluster_config": system_paasta_config.get_vault_cluster_config(),
        "vault_auth_method": args.vault_auth_method,
        "vault_token_file": args.vault_token_file,
    }

    return run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_url=docker_url,
        volumes=volumes,
        interactive=interactive,
        command=command,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        user_port=args.user_port,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
        framework=instance_type,
        secret_provider_name=system_paasta_config.get_secret_provider_name(),
        secret_provider_kwargs=secret_provider_kwargs,
        skip_secrets=args.skip_secrets,
    )
Esempio n. 39
0
def configure_and_run_docker_container(docker_client, docker_hash, service, instance, cluster, args, pull_image=False):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """
    try:
        system_paasta_config = load_system_paasta_config()
    except PaastaNotConfiguredError:
        sys.stdout.write(PaastaColors.yellow(
            "Warning: Couldn't load config files from '/etc/paasta'. This indicates\n"
            "PaaSTA is not configured locally on this host, and local-run may not behave\n"
            "the same way it would behave on a server configured for PaaSTA.\n"
        ))
        system_paasta_config = SystemPaastaConfig({"volumes": []}, '/etc/paasta')

    volumes = list()
    instance_config = get_instance_config(
        service=service,
        instance=instance,
        cluster=cluster,
        load_deployments=pull_image,
        soa_dir=args.yelpsoa_config_root,
    )

    if pull_image:
        docker_url = get_docker_url(
            system_paasta_config.get_docker_registry(), instance_config.get_docker_image())
        docker_pull_image(docker_url)

        docker_hash = docker_url

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))

    if args.interactive is True and args.cmd is None:
        command = ['bash']
    elif args.cmd:
        command = shlex.split(args.cmd)
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command = shlex.split(command_from_config)
        else:
            command = instance_config.get_args()

    hostname = socket.getfqdn()

    run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=args.interactive,
        command=command,
        hostname=hostname,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
    )
Esempio n. 40
0
def configure_and_run_docker_container(
        docker_client,
        docker_hash,
        service,
        instance,
        cluster,
        system_paasta_config,
        args,
        pull_image=False,
        dry_run=False
):
    """
    Run Docker container by image hash with args set in command line.
    Function prints the output of run command in stdout.
    """

    if instance is None and args.healthcheck_only:
        paasta_print(
            "With --healthcheck-only, --instance MUST be provided!",
            file=sys.stderr,
        )
        return 1
    if instance is None and not sys.stdin.isatty():
        paasta_print(
            "--instance and --cluster must be specified when using paasta local-run without a tty!",
            file=sys.stderr,
        )
        return 1

    soa_dir = args.yelpsoa_config_root
    volumes = list()
    load_deployments = docker_hash is None or pull_image
    interactive = args.interactive

    try:
        if instance is None:
            instance_type = 'adhoc'
            instance = 'interactive'
            instance_config = get_default_interactive_config(
                service=service,
                cluster=cluster,
                soa_dir=soa_dir,
                load_deployments=load_deployments,
            )
            interactive = True
        else:
            instance_type = validate_service_instance(service, instance, cluster, soa_dir)
            instance_config = get_instance_config(
                service=service,
                instance=instance,
                cluster=cluster,
                load_deployments=load_deployments,
                soa_dir=soa_dir,
            )
    except NoConfigurationForServiceError as e:
        paasta_print(str(e), file=sys.stderr)
        return 1
    except NoDeploymentsAvailable:
        paasta_print(
            PaastaColors.red(
                "Error: No deployments.json found in %(soa_dir)s/%(service)s."
                "You can generate this by running:"
                "generate_deployments_for_service -d %(soa_dir)s -s %(service)s" % {
                    'soa_dir': soa_dir,
                    'service': service,
                }
            ),
            sep='\n',
            file=sys.stderr,
        )
        return 1

    if docker_hash is None:
        try:
            docker_url = instance_config.get_docker_url()
        except NoDockerImageError:
            paasta_print(PaastaColors.red(
                "Error: No sha has been marked for deployment for the %s deploy group.\n"
                "Please ensure this service has either run through a jenkins pipeline "
                "or paasta mark-for-deployment has been run for %s\n" % (instance_config.get_deploy_group(), service)),
                sep='',
                file=sys.stderr,
            )
            return 1
        docker_hash = docker_url

    if pull_image:
        docker_pull_image(docker_url)

    # if only one volume specified, extra_volumes should be converted to a list
    extra_volumes = instance_config.get_extra_volumes()
    if type(extra_volumes) == dict:
        extra_volumes = [extra_volumes]

    for volume in system_paasta_config.get_volumes() + extra_volumes:
        volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))

    if interactive is True and args.cmd is None:
        command = 'bash'
    elif args.cmd:
        command = args.cmd
    else:
        command_from_config = instance_config.get_cmd()
        if command_from_config:
            command_modifier = command_function_for_framework(instance_type)
            command = command_modifier(command_from_config)
        else:
            command = instance_config.get_args()

    return run_docker_container(
        docker_client=docker_client,
        service=service,
        instance=instance,
        docker_hash=docker_hash,
        volumes=volumes,
        interactive=interactive,
        command=command,
        healthcheck=args.healthcheck,
        healthcheck_only=args.healthcheck_only,
        user_port=args.user_port,
        instance_config=instance_config,
        soa_dir=args.yelpsoa_config_root,
        dry_run=dry_run,
        json_dict=args.dry_run_json_dict,
        framework=instance_type,
    )
Esempio n. 41
0
def paasta_start_or_stop(args, desired_state):
    """Requests a change of state to start or stop given branches of a service."""
    soa_dir = args.soa_dir

    pargs = apply_args_filters(args)
    if len(pargs) == 0:
        return 1

    affected_services = {s for service_list in pargs.values() for s in service_list.keys()}
    if len(affected_services) > 1:
        paasta_print(PaastaColors.red("Warning: trying to start/stop/restart multiple services:"))

        for cluster, services_instances in pargs.items():
            paasta_print("Cluster %s:" % cluster)
            for service, instances in services_instances.items():
                paasta_print("    Service %s:" % service)
                paasta_print("        Instances %s" % ",".join(instances))

        if sys.stdin.isatty():
            confirm = choice.Binary('Are you sure you want to continue?', False).ask()
        else:
            confirm = False
        if not confirm:
            paasta_print()
            paasta_print("exiting")
            return 1

    invalid_deploy_groups = []
    marathon_message_printed, chronos_message_printed = False, False
    for cluster, services_instances in pargs.items():
        for service, instances in services_instances.items():
            try:
                remote_refs = remote_git.list_remote_refs(utils.get_git_url(service, soa_dir))
            except remote_git.LSRemoteException as e:
                msg = (
                    "Error talking to the git server: %s\n"
                    "This PaaSTA command requires access to the git server to operate.\n"
                    "The git server may be down or not reachable from here.\n"
                    "Try again from somewhere where the git server can be reached, "
                    "like your developer environment."
                ) % str(e)
                paasta_print(msg)
                return 1

            for instance in instances:
                service_config = get_instance_config(
                    service=service,
                    cluster=cluster,
                    instance=instance,
                    soa_dir=soa_dir,
                    load_deployments=False,
                )
                deploy_group = service_config.get_deploy_group()
                (deploy_tag, _) = get_latest_deployment_tag(remote_refs, deploy_group)

                if deploy_tag not in remote_refs:
                    invalid_deploy_groups.append(deploy_group)
                else:
                    force_bounce = utils.format_timestamp(datetime.datetime.utcnow())
                    if isinstance(service_config, MarathonServiceConfig) and not marathon_message_printed:
                        print_marathon_message(desired_state)
                        marathon_message_printed = True
                    elif isinstance(service_config, ChronosJobConfig) and not chronos_message_printed:
                        print_chronos_message(desired_state)
                        chronos_message_printed = True

                    issue_state_change_for_service(
                        service_config=service_config,
                        force_bounce=force_bounce,
                        desired_state=desired_state,
                    )

    return_val = 0
    if invalid_deploy_groups:
        paasta_print("No branches found for %s in %s." %
                     (", ".join(invalid_deploy_groups), remote_refs))
        paasta_print("Has %s been deployed there yet?" % service)
        return_val = 1

    return return_val