Ejemplo n.º 1
0
def status_marathon_job_verbose(
    service: str,
    instance: str,
    clients: marathon_tools.MarathonClients,
    cluster: str,
    soa_dir: str,
    job_config: marathon_tools.MarathonServiceConfig,
    dashboards: Dict[marathon_tools.MarathonClient, str],
) -> Tuple[List[MarathonTask], str]:
    """Returns detailed information about a marathon apps for a service
    and instance. Does not make assumptions about what the *exact*
    appid is, but instead does a fuzzy match on any marathon apps
    that match the given service.instance"""
    all_tasks: List[MarathonTask] = []
    all_output: List[str] = []
    # For verbose mode, we want to see *any* matching app. As it may
    # not be the one that we think should be deployed. For example
    # during a bounce we want to see the old and new ones.
    marathon_apps_with_clients = marathon_tools.get_marathon_apps_with_clients(
        clients=clients.get_all_clients_for_service(job_config),
        embed_tasks=True,
    )

    autoscaling_info = get_autoscaling_info(clients, job_config)
    if autoscaling_info:
        all_output.append("  Autoscaling Info:")
        headers = [
            field.replace("_", " ").capitalize()
            for field in ServiceAutoscalingInfo._fields
        ]
        table = [headers, autoscaling_info]
        all_output.append('\n'.join(
            ["    %s" % line for line in format_table(table)]))

    for app, client in marathon_tools.get_matching_apps_with_clients(
            service, instance, marathon_apps_with_clients):
        tasks, output = get_verbose_status_of_marathon_app(
            marathon_client=client,
            app=app,
            service=service,
            instance=instance,
            cluster=cluster,
            soa_dir=soa_dir,
            dashboards=dashboards,
        )
        all_tasks.extend(tasks)
        all_output.append(output)
    return all_tasks, "\n".join(all_output)
Ejemplo n.º 2
0
def deploy_marathon_service(
    service: str,
    instance: str,
    clients: marathon_tools.MarathonClients,
    soa_dir: str,
    marathon_apps_with_clients: Optional[Collection[Tuple[MarathonApp,
                                                          MarathonClient]]],
) -> Tuple[int, float]:
    """deploy the service instance given and proccess return code
    if there was an error we send a sensu alert.

    :param service: The service name to setup
    :param instance: The instance of the service to setup
    :param clients: A MarathonClients object
    :param soa_dir: Path to yelpsoa configs
    :param marathon_apps: A list of all marathon app objects
    :returns: A tuple of (status, bounce_in_seconds) to be used by paasta-deployd
        bounce_in_seconds instructs how long until the deployd should try another bounce
        None means that it is in a steady state and doesn't need to bounce again
    """
    short_id = marathon_tools.format_job_id(service, instance)
    try:
        with bounce_lib.bounce_lock_zookeeper(short_id):
            try:
                service_instance_config = marathon_tools.load_marathon_service_config_no_cache(
                    service,
                    instance,
                    load_system_paasta_config().get_cluster(),
                    soa_dir=soa_dir,
                )
            except NoDeploymentsAvailable:
                log.debug(
                    "No deployments found for %s.%s in cluster %s. Skipping." %
                    (service, instance,
                     load_system_paasta_config().get_cluster()))
                return 0, None
            except NoConfigurationForServiceError:
                error_msg = "Could not read marathon configuration file for %s.%s in cluster %s" % \
                            (service, instance, load_system_paasta_config().get_cluster())
                log.error(error_msg)
                return 1, None

            if marathon_apps_with_clients is None:
                marathon_apps_with_clients = marathon_tools.get_marathon_apps_with_clients(
                    clients=clients.get_all_clients_for_service(
                        job_config=service_instance_config),
                    embed_tasks=True,
                )

            try:
                with a_sync.idle_event_loop():
                    status, output, bounce_again_in_seconds = setup_service(
                        service=service,
                        instance=instance,
                        clients=clients,
                        job_config=service_instance_config,
                        marathon_apps_with_clients=marathon_apps_with_clients,
                        soa_dir=soa_dir,
                    )
                sensu_status = pysensu_yelp.Status.CRITICAL if status else pysensu_yelp.Status.OK
                send_event(service, instance, soa_dir, sensu_status, output)
                return 0, bounce_again_in_seconds
            except (KeyError, TypeError, AttributeError, InvalidInstanceConfig,
                    NoSlavesAvailableError):
                error_str = traceback.format_exc()
                log.error(error_str)
                send_event(service, instance, soa_dir,
                           pysensu_yelp.Status.CRITICAL, error_str)
                return 1, None
    except bounce_lib.LockHeldException:
        log.error("Instance %s already being bounced. Exiting", short_id)
        return 0, None
Ejemplo n.º 3
0
def status_marathon_job(
    service: str,
    instance: str,
    cluster: str,
    soa_dir: str,
    dashboards: Dict[marathon_tools.MarathonClient, str],
    normal_instance_count: int,
    clients: marathon_tools.MarathonClients,
    job_config: marathon_tools.MarathonServiceConfig,
    desired_app_id: str,
    verbose: int,
) -> Tuple[List[MarathonTask], str]:
    marathon_apps_with_clients = marathon_tools.get_marathon_apps_with_clients(
        clients=clients.get_all_clients_for_service(job_config),
        embed_tasks=True,
        service_name=service,
    )
    all_tasks = []
    all_output = [
        ""
    ]  # One entry that will be replaced with status_marathon_job_human output later.

    running_instances = 0

    if verbose > 0:
        autoscaling_info = get_autoscaling_info(marathon_apps_with_clients, job_config)
        if autoscaling_info:
            all_output.append("  Autoscaling Info:")
            headers = [
                field.replace("_", " ").capitalize()
                for field in ServiceAutoscalingInfo._fields
            ]
            table = [headers, humanize_autoscaling_info(autoscaling_info)]
            all_output.append(
                "\n".join(["    %s" % line for line in format_table(table)])
            )

    deploy_status_for_desired_app = "Waiting for bounce"
    matching_apps_with_clients = marathon_tools.get_matching_apps_with_clients(
        service, instance, marathon_apps_with_clients
    )
    for app, client in matching_apps_with_clients:
        all_tasks.extend(app.tasks)
        (
            deploy_status_for_current_app,
            running_instances_for_current_app,
            out,
        ) = status_marathon_app(
            marathon_client=client,
            app=app,
            service=service,
            instance=instance,
            cluster=cluster,
            soa_dir=soa_dir,
            dashboards=dashboards,
            verbose=verbose,
        )
        if app.id.lstrip("/") == desired_app_id.lstrip("/"):
            deploy_status_for_desired_app = marathon_tools.MarathonDeployStatus.tostring(
                deploy_status_for_current_app
            )

        running_instances += running_instances_for_current_app
        all_output.append(out)

    all_output[0] = status_marathon_job_human(
        service=service,
        instance=instance,
        deploy_status=deploy_status_for_desired_app,
        desired_app_id=desired_app_id,
        app_count=len(matching_apps_with_clients),
        running_instances=running_instances,
        normal_instance_count=normal_instance_count,
    )

    return all_tasks, "\n".join(all_output)