def print_kubernetes_status( service: str, instance: str, output: List[str], kubernetes_status ) -> int: if kubernetes_status.error_message: output.append(kubernetes_status.error_message) return 1 bouncing_status = bouncing_status_human( kubernetes_status.app_count, kubernetes_status.bounce_method ) desired_state = desired_state_human( kubernetes_status.desired_state, kubernetes_status.expected_instance_count ) output.append(f" State: {bouncing_status} - Desired state: {desired_state}") status = KubernetesDeployStatus.fromstring(kubernetes_status.deploy_status) deploy_status = kubernetes_app_deploy_status_human(status) output.append( " {}".format( status_kubernetes_job_human( service=service, instance=instance, deploy_status=deploy_status, desired_app_id=kubernetes_status.app_id, app_count=kubernetes_status.app_count, running_instances=kubernetes_status.running_instance_count, normal_instance_count=kubernetes_status.expected_instance_count, ) ) ) return 0
def print_kubernetes_status( service: str, instance: str, kubernetes_status, ) -> int: if kubernetes_status.error_message: paasta_print(kubernetes_status.error_message) return 1 bouncing_status = bouncing_status_human( kubernetes_status.app_count, kubernetes_status.bounce_method, ) desired_state = desired_state_human( kubernetes_status.desired_state, kubernetes_status.expected_instance_count, ) paasta_print( f"State: {bouncing_status} - Desired state: {desired_state}") status = KubernetesDeployStatus.fromstring(kubernetes_status.deploy_status) deploy_status = kubernetes_app_deploy_status_human(status) paasta_print( status_kubernetes_job_human( service=service, instance=instance, deploy_status=deploy_status, desired_app_id=kubernetes_status.app_id, app_count=kubernetes_status.app_count, running_instances=kubernetes_status.running_instance_count, normal_instance_count=kubernetes_status.expected_instance_count, ), ) return 0
def paasta_status_on_api_endpoint(cluster, service, instance, system_paasta_config, verbose): client = get_paasta_api_client(cluster, system_paasta_config) if not client: paasta_print('Cannot get a paasta-api client') exit(1) try: status = client.service.status_instance(service=service, instance=instance).result() except HTTPError as exc: paasta_print(exc.response.text) return exc.status_code paasta_print('instance: %s' % PaastaColors.blue(instance)) paasta_print('Git sha: %s (desired)' % status.git_sha) marathon_status = status.marathon if marathon_status is None: paasta_print( "Not implemented: Looks like %s is not a Marathon instance" % instance) return 0 elif marathon_status.error_message: paasta_print(marathon_status.error_message) return 1 bouncing_status = bouncing_status_human( marathon_status.app_count, marathon_status.bounce_method, ) desired_state = desired_state_human( marathon_status.desired_state, marathon_status.expected_instance_count, ) paasta_print("State: %s - Desired state: %s" % (bouncing_status, desired_state)) status = MarathonDeployStatus.fromstring(marathon_status.deploy_status) if status != MarathonDeployStatus.NotRunning: if status == MarathonDeployStatus.Delayed: deploy_status = marathon_app_deploy_status_human( status, marathon_status.backoff_seconds) else: deploy_status = marathon_app_deploy_status_human(status) else: deploy_status = 'NotRunning' paasta_print( status_marathon_job_human( service=service, instance=instance, deploy_status=deploy_status, desired_app_id=marathon_status.app_id, app_count=marathon_status.app_count, running_instances=marathon_status.running_instance_count, normal_instance_count=marathon_status.expected_instance_count, ), ) return 0
def print_marathon_status( service: str, instance: str, output: List[str], marathon_status ) -> int: if marathon_status.error_message: output.append(marathon_status.error_message) return 1 bouncing_status = bouncing_status_human( marathon_status.app_count, marathon_status.bounce_method ) desired_state = desired_state_human( marathon_status.desired_state, marathon_status.expected_instance_count ) output.append(f" Desired state: {bouncing_status} and {desired_state}") job_status_human = status_marathon_job_human( service=service, instance=instance, deploy_status=marathon_status.deploy_status, desired_app_id=marathon_status.desired_app_id, app_count=marathon_status.app_count, running_instances=marathon_status.running_instance_count, normal_instance_count=marathon_status.expected_instance_count, ) output.append(f" {job_status_human}") if marathon_status.autoscaling_info: autoscaling_info_table = create_autoscaling_info_table( marathon_status.autoscaling_info ) output.extend([f" {line}" for line in autoscaling_info_table]) for app_status in marathon_status.app_statuses: app_status_human = marathon_app_status_human( marathon_status.desired_app_id, app_status ) output.extend([f" {line}" for line in app_status_human]) mesos_status_human = marathon_mesos_status_human( marathon_status.mesos.error_message, marathon_status.mesos.running_task_count or 0, marathon_status.expected_instance_count, marathon_status.mesos.running_tasks, marathon_status.mesos.non_running_tasks, ) output.extend([f" {line}" for line in mesos_status_human]) if marathon_status.smartstack is not None: smartstack_status_human = get_smartstack_status_human( marathon_status.smartstack.registration, marathon_status.smartstack.expected_backends_per_location, marathon_status.smartstack.locations, ) output.extend([f" {line}" for line in smartstack_status_human]) return 0
def print_kubernetes_status(service: str, instance: str, output: List[str], kubernetes_status) -> int: if kubernetes_status.error_message: output.append(kubernetes_status.error_message) return 1 bouncing_status = bouncing_status_human(kubernetes_status.app_count, kubernetes_status.bounce_method) desired_state = desired_state_human( kubernetes_status.desired_state, kubernetes_status.expected_instance_count) output.append( f" State: {bouncing_status} - Desired state: {desired_state}") status = KubernetesDeployStatus.fromstring(kubernetes_status.deploy_status) deploy_status = kubernetes_app_deploy_status_human(status) output.append(" {}".format( status_kubernetes_job_human( service=service, instance=instance, deploy_status=deploy_status, desired_app_id=kubernetes_status.app_id, app_count=kubernetes_status.app_count, running_instances=kubernetes_status.running_instance_count, normal_instance_count=kubernetes_status.expected_instance_count, ))) if kubernetes_status.create_timestamp: create_datetime = datetime.fromtimestamp( kubernetes_status.create_timestamp) output.append(" App created: {} ({}). Namespace: {}".format( create_datetime, humanize.naturaltime(create_datetime), kubernetes_status.namespace, )) if kubernetes_status.pods and len(kubernetes_status.pods) > 0: output.append(" Pods:") pods_table = format_kubernetes_pod_table(kubernetes_status.pods) output.extend([f" {line}" for line in pods_table]) if kubernetes_status.replicasets and len( kubernetes_status.replicasets) > 0: output.append(" ReplicaSets:") replicasets_table = format_kubernetes_replicaset_table( kubernetes_status.replicasets) output.extend([f" {line}" for line in replicasets_table]) if kubernetes_status.smartstack is not None: smartstack_status_human = get_smartstack_status_human( kubernetes_status.smartstack.registration, kubernetes_status.smartstack.expected_backends_per_location, kubernetes_status.smartstack.locations, ) output.extend([f" {line}" for line in smartstack_status_human]) return 0
def print_marathon_status(service: str, instance: str, output: List[str], marathon_status) -> int: if marathon_status.error_message: output.append(marathon_status.error_message) return 1 bouncing_status = bouncing_status_human(marathon_status.app_count, marathon_status.bounce_method) desired_state = desired_state_human( marathon_status.desired_state, marathon_status.expected_instance_count) output.append( f" Desired state: {bouncing_status} and {desired_state}") job_status_human = status_marathon_job_human( service=service, instance=instance, deploy_status=marathon_status.deploy_status, desired_app_id=marathon_status.desired_app_id, app_count=marathon_status.app_count, running_instances=marathon_status.running_instance_count, normal_instance_count=marathon_status.expected_instance_count, ) output.append(f" {job_status_human}") if marathon_status.autoscaling_info: autoscaling_info_table = create_autoscaling_info_table( marathon_status.autoscaling_info) output.extend([f" {line}" for line in autoscaling_info_table]) for app_status in marathon_status.app_statuses: app_status_human = marathon_app_status_human( marathon_status.desired_app_id, app_status) output.extend([f" {line}" for line in app_status_human]) mesos_status_human = marathon_mesos_status_human( marathon_status.mesos.error_message, marathon_status.mesos.running_task_count or 0, marathon_status.expected_instance_count, marathon_status.mesos.running_tasks, marathon_status.mesos.non_running_tasks, ) output.extend([f" {line}" for line in mesos_status_human]) if marathon_status.smartstack is not None: smartstack_status_human = marathon_smartstack_status_human( marathon_status.smartstack.registration, marathon_status.smartstack.expected_backends_per_location, marathon_status.smartstack.locations, # TODO: this is just to avoid rollout issues where the client updates # before the API server. This can be removed after it first rolls # out getattr(marathon_status.smartstack, "error_message", None), ) output.extend([f" {line}" for line in smartstack_status_human]) return 0
def paasta_status_on_api_endpoint(cluster, service, instance, system_paasta_config, verbose): client = get_paasta_api_client(cluster, system_paasta_config) if not client: paasta_print('Cannot get a paasta-api client') exit(1) try: status = client.service.status_instance(service=service, instance=instance).result() except HTTPError as exc: paasta_print(exc.response.text) return exc.status_code paasta_print('instance: %s' % PaastaColors.blue(instance)) paasta_print('Git sha: %s (desired)' % status.git_sha) marathon_status = status.marathon if marathon_status is None: paasta_print("Not implemented: Looks like %s is not a Marathon instance" % instance) return 0 elif marathon_status.error_message: paasta_print(marathon_status.error_message) return 1 bouncing_status = bouncing_status_human(marathon_status.app_count, marathon_status.bounce_method) desired_state = desired_state_human(marathon_status.desired_state, marathon_status.expected_instance_count) paasta_print("State: %s - Desired state: %s" % (bouncing_status, desired_state)) status = MarathonDeployStatus.fromstring(marathon_status.deploy_status) if status != MarathonDeployStatus.NotRunning: if status == MarathonDeployStatus.Delayed: deploy_status = marathon_app_deploy_status_human(status, marathon_status.backoff_seconds) else: deploy_status = marathon_app_deploy_status_human(status) else: deploy_status = 'NotRunning' paasta_print( status_marathon_job_human( service, instance, deploy_status, marathon_status.app_id, marathon_status.running_instance_count, marathon_status.expected_instance_count, ) ) return 0
def print_marathon_status( service: str, instance: str, output: List[str], marathon_status, ) -> int: if marathon_status.error_message: output.append(marathon_status.error_message) return 1 bouncing_status = bouncing_status_human( marathon_status.app_count, marathon_status.bounce_method, ) desired_state = desired_state_human( marathon_status.desired_state, marathon_status.expected_instance_count, ) output.append( f" State: {bouncing_status} - Desired state: {desired_state}") status = MarathonDeployStatus.fromstring(marathon_status.deploy_status) if status != MarathonDeployStatus.NotRunning: if status == MarathonDeployStatus.Delayed: deploy_status = marathon_app_deploy_status_human( status, marathon_status.backoff_seconds) else: deploy_status = marathon_app_deploy_status_human(status) else: deploy_status = 'NotRunning' output.append( " {}".format( status_marathon_job_human( service=service, instance=instance, deploy_status=deploy_status, desired_app_id=marathon_status.app_id, app_count=marathon_status.app_count, running_instances=marathon_status.running_instance_count, normal_instance_count=marathon_status.expected_instance_count, ), ), ) return 0
def paasta_status_on_api_endpoint(cluster, service, instance, system_paasta_config, verbose): client = get_paasta_api_client(cluster, system_paasta_config) if not client: print 'Cannot get a paasta-api client' exit(1) try: status = client.service.status_instance(service=service, instance=instance).result() except HTTPError as exc: print exc.response.text return print 'instance: %s' % PaastaColors.blue(instance) print 'Git sha: %s (desired)' % status.git_sha marathon_status = status.marathon if marathon_status.error_message: print marathon_status.error_message return bouncing_status = bouncing_status_human(marathon_status.app_count, marathon_status.bounce_method) desired_state = desired_state_human(marathon_status.desired_state, marathon_status.expected_instance_count) print "State: %s - Desired state: %s" % (bouncing_status, desired_state) status = MarathonDeployStatus.fromstring(marathon_status.deploy_status) if status != MarathonDeployStatus.NotRunning: if status == MarathonDeployStatus.Delayed: deploy_status = marathon_app_deploy_status_human(status, marathon_status.backoff_seconds) else: deploy_status = marathon_app_deploy_status_human(status) else: deploy_status = 'NotRunning' print status_marathon_job_human(service, instance, deploy_status, marathon_status.app_id, marathon_status.running_instance_count, marathon_status.expected_instance_count)