def report_status_for_cluster(service, cluster, deploy_pipeline, actual_deployments, instance_whitelist, verbose=False): """With a given service and cluster, prints the status of the instances in that cluster""" # Get cluster.instance in the order in which they appear in deploy.yaml print print "cluster: %s" % cluster seen_instances = [] for namespace in deploy_pipeline: cluster_in_pipeline, instance = namespace.split('.') seen_instances.append(instance) if cluster_in_pipeline != cluster: continue if instance_whitelist and instance not in instance_whitelist: continue # Case: service deployed to cluster.instance if namespace in actual_deployments: formatted_instance = PaastaColors.blue(instance) version = actual_deployments[namespace][:8] # TODO: Perform sanity checks once per cluster instead of for each namespace status = execute_paasta_serviceinit_on_remote_master('status', cluster, service, instance, verbose=verbose) # Case: service NOT deployed to cluster.instance else: formatted_instance = PaastaColors.red(instance) version = 'None' status = None print ' instance: %s' % formatted_instance print ' Git sha: %s' % version if status is not None: for line in status.rstrip().split('\n'): print ' %s' % line print report_invalid_whitelist_values(instance_whitelist, seen_instances, 'instance')
def test_execute_paasta_serviceinit_status_on_remote_master_happy_path( mock_run_paasta_serviceinit, mock_find_connectable_master, mock_calculate_remote_masters, ): cluster = 'fake_cluster_name' service = 'fake_service' instancename = 'fake_instance' remote_masters = ( 'fake_master1', 'fake_master2', 'fake_master3', ) mock_calculate_remote_masters.return_value = (remote_masters, None) mock_find_connectable_master.return_value = ('fake_connectable_master', None) actual = utils.execute_paasta_serviceinit_on_remote_master('status', cluster, service, instancename) mock_calculate_remote_masters.assert_called_once_with(cluster) mock_find_connectable_master.assert_called_once_with(remote_masters) mock_run_paasta_serviceinit.assert_called_once_with( 'status', 'fake_connectable_master', service, instancename, cluster, ) assert actual == mock_run_paasta_serviceinit.return_value
def paasta_emergency_restart(args): """Performs an emergency restart on a given service instance on a given cluster Warning: This command is only intended to be used in an emergency. It should not be needed in normal circumstances. See the service-docs for paasta emergency-stop and emergency-start for details of what exactly this does: http://servicedocs.yelpcorp.com/docs/paasta_tools/generated/paasta_tools.paasta_cli.cmds.emergency_stop.html http://servicedocs.yelpcorp.com/docs/paasta_tools/generated/paasta_tools.paasta_cli.cmds.emergency_start.html """ service = figure_out_service_name(args) print "Performing an emergency restart on %s...\n" % compose_job_id(service, args.instance) execute_paasta_serviceinit_on_remote_master('restart', args.cluster, service, args.instance) print "%s" % "\n".join(paasta_emergency_restart.__doc__.splitlines()[-7:]) print "Run this to see the status:" print "paasta status --service %s --clusters %s" % (service, args.cluster)
def paasta_emergency_restart(args): """Performs an emergency restart on a given service instance on a given cluster Warning: This command is only intended to be used in an emergency. It should not be needed in normal circumstances. See the service-docs for paasta emergency-stop and emergency-start for details of what exactly this does: http://servicedocs.yelpcorp.com/docs/paasta_tools/generated/paasta_tools.paasta_cli.cmds.emergency_stop.html http://servicedocs.yelpcorp.com/docs/paasta_tools/generated/paasta_tools.paasta_cli.cmds.emergency_start.html """ service = figure_out_service_name(args) print "Performing an emergency restart on %s...\n" % compose_job_id(service, args.instance) execute_paasta_serviceinit_on_remote_master("restart", args.cluster, service, args.instance) print "%s" % "\n".join(paasta_emergency_restart.__doc__.splitlines()[-7:]) print "Run this to see the status:" print "paasta status --service %s --clusters %s" % (service, args.cluster)
def report_status_for_cluster(service, cluster, deploy_pipeline, actual_deployments, verbose=False): """With a given service and cluster, prints the status of the instances in that cluster""" # Get cluster.instance in the order in which they appear in deploy.yaml print print "cluster: %s" % cluster for namespace in deploy_pipeline: cluster_in_pipeline, instance = namespace.split('.') if cluster_in_pipeline != cluster: # This function only prints things that are relevant to cluster # We skip anything not in this cluster continue # Case: service deployed to cluster.instance if namespace in actual_deployments: unformatted_instance = instance instance = PaastaColors.blue(instance) version = actual_deployments[namespace][:8] # TODO: Perform sanity checks once per cluster instead of for each namespace status = execute_paasta_serviceinit_on_remote_master('status', cluster, service, unformatted_instance, verbose=verbose) # Case: service NOT deployed to cluster.instance else: instance = PaastaColors.red(instance) version = 'None' status = None print ' instance: %s' % instance print ' Git sha: %s' % version if status is not None: for line in status.rstrip().split('\n'): print ' %s' % line
def paasta_emergency_start(args): """Performs an emergency start on a given service instance on a given cluster Warning: This command is not magic and cannot actually get a service to start if it couldn't run before. This includes configurations that prevent the service from running, such as 'instances: 0' (for Marathon apps). All it does for Marathon apps is ask Marathon to resume normal operation by scaling up to the instance count defined in the service's config. All it does for Chronos jobs is send the latest version of the job config to Chronos and run it immediately. """ service = figure_out_service_name(args) print "Performing an emergency start on %s..." % compose_job_id(service, args.instance) execute_paasta_serviceinit_on_remote_master('start', args.cluster, service, args.instance) print "%s" % "\n".join(paasta_emergency_start.__doc__.splitlines()[-8:]) print "Run this command to see the status:" print "paasta status --service %s --clusters %s" % (service, args.cluster)
def paasta_emergency_start(args): """Performs an emergency start on a given service instance on a given cluster Warning: This command is not magic and cannot actually get a service to start if it couldn't run before. This includes configurations that prevent the service from running, such as 'instances: 0' (for Marathon apps). All it does for Marathon apps is ask Marathon to resume normal operation by scaling up to the instance count defined in the service's config. All it does for Chronos jobs is send the latest version of the job config to Chronos and run it immediately. """ service = figure_out_service_name(args) print "Performing an emergency start on %s..." % compose_job_id(service, args.instance) execute_paasta_serviceinit_on_remote_master("start", args.cluster, service, args.instance) print "%s" % "\n".join(paasta_emergency_start.__doc__.splitlines()[-8:]) print "Run this command to see the status:" print "paasta status --service %s --clusters %s" % (service, args.cluster)
def paasta_emergency_scale(args): """Performs an emergency scale on a given service instance on a given cluster Warning: This command does not permanently scale the service. The next time the service is updated (config change, deploy, bounce, etc.), those settings will override the emergency scale. If you want this scale to be permanant, adjust the relevant config file to reflect that. For example, this can be done for Marathon apps by setting 'instances: n' """ service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root) print "Performing an emergency scale on %s..." % compose_job_id(service, args.instance) output = execute_paasta_serviceinit_on_remote_master('scale', args.cluster, service, args.instance, app_id=args.appid, delta=args.delta) print "Output: %s" % output print "%s" % "\n".join(paasta_emergency_scale.__doc__.splitlines()[-7:])
def test_execute_paasta_serviceinit_on_remote_no_connectable_master( mock_run_paasta_serviceinit, mock_check_ssh_and_sudo_on_master, mock_find_connectable_master, mock_calculate_remote_masters, ): cluster = 'fake_cluster_name' service = 'fake_service' instancename = 'fake_instance' mock_find_connectable_master.return_value = (None, "fake_err_msg") mock_calculate_remote_masters.return_value = (['fake_master'], None) actual = utils.execute_paasta_serviceinit_on_remote_master('status', cluster, service, instancename) assert mock_check_ssh_and_sudo_on_master.call_count == 0 assert 'ERROR: could not find connectable master in cluster %s' % cluster in actual assert "fake_err_msg" in actual
def paasta_emergency_stop(args): """Performs an emergency stop on a given service instance on a given cluster Warning: This command does not permanently stop the service. The next time the service is updated (config change, deploy, bounce, etc.), those settings will override the emergency stop. If you want this stop to be permanant, adjust the relevant config file to reflect that. For example, this can be done for Marathon apps by setting 'instances: 0', or for Chronos jobs by setting 'disabled: True'. Alternatively, remove the config yaml entirely. """ service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root) print "Performing an emergency stop on %s..." % compose_job_id(service, args.instance) output = execute_paasta_serviceinit_on_remote_master('stop', args.cluster, service, args.instance, app_id=args.appid) print "Output: %s" % output print "%s" % "\n".join(paasta_emergency_stop.__doc__.splitlines()[-7:]) print "To start this service again asap, run:" print "paasta emergency-start --service %s --instance %s --cluster %s" % (service, args.instance, args.cluster)
def report_status_for_cluster(service, cluster, deploy_pipeline, actual_deployments, instance_whitelist, verbose=False): """With a given service and cluster, prints the status of the instances in that cluster""" # Get cluster.instance in the order in which they appear in deploy.yaml print print "cluster: %s" % cluster seen_instances = [] for namespace in deploy_pipeline: cluster_in_pipeline, instance = namespace.split('.') seen_instances.append(instance) if cluster_in_pipeline != cluster: continue if instance_whitelist and instance not in instance_whitelist: continue # Case: service deployed to cluster.instance if namespace in actual_deployments: formatted_instance = PaastaColors.blue(instance) version = actual_deployments[namespace][:8] # TODO: Perform sanity checks once per cluster instead of for each namespace status = execute_paasta_serviceinit_on_remote_master( 'status', cluster, service, instance, verbose=verbose) # Case: service NOT deployed to cluster.instance else: formatted_instance = PaastaColors.red(instance) version = 'None' status = None print ' instance: %s' % formatted_instance print ' Git sha: %s' % version if status is not None: for line in status.rstrip().split('\n'): print ' %s' % line print report_invalid_whitelist_values(instance_whitelist, seen_instances, 'instance')
def paasta_emergency_stop(args): """Performs an emergency stop on a given service instance on a given cluster Warning: This command does not permanently stop the service. The next time the service is updated (config change, deploy, bounce, etc.), those settings will override the emergency stop. If you want this stop to be permanant, adjust the relevant config file to reflect that. For example, this can be done for Marathon apps by setting 'instances: 0', or for Chronos jobs by setting 'disabled: True'. Alternatively, remove the config yaml entirely. """ service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root) print "Performing an emergency stop on %s..." % compose_job_id( service, args.instance) output = execute_paasta_serviceinit_on_remote_master('stop', args.cluster, service, args.instance, app_id=args.appid) print "Output: %s" % output print "%s" % "\n".join(paasta_emergency_stop.__doc__.splitlines()[-7:]) print "To start this service again asap, run:" print "paasta emergency-start --service %s --instance %s --cluster %s" % ( service, args.instance, args.cluster)