def paasta_emergency_start(args): """Performs an emergency start on a given service instance on a given cluster All it does for Chronos jobs is send the latest version of the job config to Chronos and run it immediately. """ system_paasta_config = load_system_paasta_config() service = figure_out_service_name(args, soa_dir=args.soa_dir) paasta_print("Performing an emergency start on %s..." % compose_job_id(service, args.instance)) return_code, output = execute_paasta_serviceinit_on_remote_master( subcommand='start', cluster=args.cluster, service=service, instances=args.instance, system_paasta_config=system_paasta_config, ) _log_audit( action='emergency-start', service=service, cluster=args.cluster, instance=args.instance, ) paasta_print("%s" % "\n".join(paasta_emergency_start.__doc__.splitlines()[-8:])) paasta_print("Output: %s" % PaastaColors.grey(output)) paasta_print("Run this command to see the status:") paasta_print(f"paasta status --service {service} --clusters {args.cluster}") return return_code
def paasta_emergency_stop(args): """Performs an emergency stop on a given service instance on a given cluster """ system_paasta_config = load_system_paasta_config() service = figure_out_service_name(args, soa_dir=args.soa_dir) paasta_print("Performing an emergency stop on %s..." % compose_job_id(service, args.instance)) return_code, output = execute_paasta_serviceinit_on_remote_master( subcommand="stop", cluster=args.cluster, service=service, instances=args.instance, system_paasta_config=system_paasta_config, ) _log_audit( action="emergency-stop", service=service, cluster=args.cluster, instance=args.instance, ) paasta_print("Output: %s" % output) paasta_print("%s" % "\n".join(paasta_emergency_stop.__doc__.splitlines()[-7:])) paasta_print("To start this service again asap, run:") paasta_print( f"paasta emergency-start --service {service} --instance {args.instance} --cluster {args.cluster}" ) return return_code
def log_event(service_config, desired_state): user = utils.get_username() host = socket.getfqdn() line = "Issued request to change state of {} (an instance of {}) to '{}' by {}@{}".format( service_config.get_instance(), service_config.get_service(), desired_state, user, host, ) utils._log( service=service_config.get_service(), level="event", cluster=service_config.get_cluster(), instance=service_config.get_instance(), component="deploy", line=line, ) utils._log_audit( action=desired_state, service=service_config.get_service(), cluster=service_config.get_cluster(), instance=service_config.get_instance(), )
def mark_for_deployment(git_url, deploy_group, service, commit): """Mark a docker image for deployment""" tag = get_paasta_tag_from_deploy_group( identifier=deploy_group, desired_state="deploy" ) remote_tag = format_tag(tag) ref_mutator = remote_git.make_force_push_mutate_refs_func( targets=[remote_tag], sha=commit ) max_attempts = 3 for attempt in range(1, max_attempts + 1): try: remote_git.create_remote_refs( git_url=git_url, ref_mutator=ref_mutator, force=True ) except Exception: logline = "Failed to mark {} for deployment in deploy group {}! (attempt {}/{})".format( commit, deploy_group, attempt, max_attempts ) _log(service=service, line=logline, component="deploy", level="event") time.sleep(5 * attempt) else: logline = f"Marked {commit} for deployment in deploy group {deploy_group}" _log(service=service, line=logline, component="deploy", level="event") audit_action_details = {"deploy_group": deploy_group, "commit": commit} _log_audit( action="mark-for-deployment", action_details=audit_action_details, service=service, ) return 0 return 1
def paasta_emergency_restart(args): """Performs an emergency restart on a given service instance on a given cluster Warning: This command is only intended to be used in an emergency. It should not be needed in normal circumstances. """ service = figure_out_service_name(args, args.soa_dir) system_paasta_config = load_system_paasta_config() paasta_print("Performing an emergency restart on %s...\n" % compose_job_id(service, args.instance)) return_code, output = execute_paasta_serviceinit_on_remote_master( subcommand='restart', cluster=args.cluster, service=args.service, instances=args.instance, system_paasta_config=system_paasta_config, ) _log_audit( action='emergency-restart', service=args.service, cluster=args.cluster, instance=args.instance, ) paasta_print("Output: %s" % output) paasta_print("%s" % "\n".join(paasta_emergency_restart.__doc__.splitlines()[-7:])) paasta_print("Run this to see the status:") paasta_print( f"paasta status --service {service} --clusters {args.cluster}") return return_code
def paasta_autoscale(args): log.setLevel(logging.DEBUG) service = figure_out_service_name(args) api = client.get_paasta_api_client(cluster=args.cluster, http_res=True) if not api: paasta_print( "Could not connect to paasta api. Maybe you misspelled the cluster?" ) return 1 if args.set is None: log.debug("Getting the current autoscaler count...") res, http = api.autoscaler.get_autoscaler_count( service=service, instance=args.instance).result() else: log.debug(f"Setting desired instances to {args.set}.") body = {"desired_instances": int(args.set)} res, http = api.autoscaler.update_autoscaler_count( service=service, instance=args.instance, json_body=body).result() _log_audit( action="manual-scale", action_details=body, service=service, instance=args.instance, cluster=args.cluster, ) log.debug(f"Res: {res} Http: {http}") print(res["desired_instances"]) return 0
def paasta_pause_service_autoscaler(args): """With a given cluster and duration, pauses the paasta service autoscaler in that cluster for duration minutes""" if args.duration > MAX_PAUSE_DURATION: if not args.force: paasta_print('Specified duration: {d} longer than max: {m}'.format( d=args.duration, m=MAX_PAUSE_DURATION, )) paasta_print('If you are really sure, run again with --force') return 3 if args.info: return_code = get_service_autoscale_pause_time(args.cluster) elif args.resume: return_code = delete_service_autoscale_pause_time(args.cluster) _log_audit( action='resume-service-autoscaler', cluster=args.cluster, ) else: minutes = args.duration return_code = update_service_autoscale_pause_time( args.cluster, minutes) _log_audit( action='pause-service-autoscaler', action_details={'duration': minutes}, cluster=args.cluster, ) return return_code
def paasta_push_to_registry(args: argparse.Namespace) -> int: """Upload a docker image to a registry""" service = args.service if service and service.startswith("services-"): service = service.split("services-", 1)[1] validate_service_name(service, args.soa_dir) image_identifier = build_image_identifier(args.commit, None, args.image_version) if not args.force: try: if is_docker_image_already_in_registry(service, args.soa_dir, args.commit, args.image_version): print( "The docker image is already in the PaaSTA docker registry. " "I'm NOT overriding the existing image. " "Add --force to override the image in the registry if you are sure what you are doing." ) return 0 except RequestException as e: registry_uri = get_service_docker_registry(service, args.soa_dir) print( "Can not connect to the PaaSTA docker registry '%s' to verify if this image exists.\n" "%s" % (registry_uri, str(e))) return 1 cmd = build_command(service, args.commit, args.image_version) loglines = [] returncode, output = _run( cmd, timeout=3600, log=True, component="build", service=service, loglevel="debug", ) if returncode != 0: loglines.append("ERROR: Failed to promote image for %s." % image_identifier) output = get_jenkins_build_output_url() if output: loglines.append("See output: %s" % output) else: loglines.append("Successfully pushed image for %s to registry" % image_identifier) _log_audit( action="push-to-registry", action_details={"commit": args.commit}, service=service, ) for logline in loglines: _log(service=service, line=logline, component="build", level="event") return returncode
def paasta_autoscale(args): log.setLevel(logging.DEBUG) service = figure_out_service_name(args) api = client.get_paasta_oapi_client(cluster=args.cluster, http_res=True) if not api: print( "Could not connect to paasta api. Maybe you misspelled the cluster?" ) return 1 try: if args.set is None: log.debug("Getting the current autoscaler count...") res, status, _ = api.autoscaler.get_autoscaler_count( service=service, instance=args.instance, _return_http_data_only=False) else: log.debug(f"Setting desired instances to {args.set}.") msg = paastamodels.AutoscalerCountMsg( desired_instances=int(args.set)) res, status, _ = api.autoscaler.update_autoscaler_count( service=service, instance=args.instance, autoscaler_count_msg=msg, _return_http_data_only=False, ) _log_audit( action="manual-scale", action_details=str(msg), service=service, instance=args.instance, cluster=args.cluster, ) except api.api_error as exc: status = exc.status if not 200 <= status <= 299: print( PaastaColors.red( f"ERROR: '{args.instance}' is not configured to autoscale, " f"so paasta autoscale could not scale it up on demand. " f"If you want to be able to boost this service, please configure autoscaling for the service " f"in its config file by setting min and max instances. Example: \n" f"{args.instance}:\n" f" min_instances: 5\n" f" max_instances: 50")) return 0 log.debug(f"Res: {res} Http: {status}") print(res.desired_instances) return 0
def paasta_cook_image(args, service=None, soa_dir=None): """Build a docker image""" if not service: service = args.service if service.startswith('services-'): service = service.split('services-', 1)[1] if not soa_dir: soa_dir = args.yelpsoa_config_root validate_service_name(service, soa_dir) run_env = os.environ.copy() default_tag = 'paasta-cook-image-{}-{}'.format(service, get_username()) tag = run_env.get('DOCKER_TAG', default_tag) run_env['DOCKER_TAG'] = tag if not makefile_responds_to('cook-image'): paasta_print( 'ERROR: local-run now requires a cook-image target to be present in the Makefile. See' 'http://paasta.readthedocs.io/en/latest/about/contract.html', file=sys.stderr, ) return 1 try: cmd = 'make cook-image' returncode, output = _run( cmd, env=run_env, log=True, component='build', service=service, loglevel='debug', ) if returncode != 0: _log( service=service, line='ERROR: make cook-image failed for %s.' % service, component='build', level='event', ) else: action_details = { 'tag': tag, } _log_audit( action='cook-image', action_details=action_details, service=service, ) return returncode except KeyboardInterrupt: paasta_print('\nProcess interrupted by the user. Cancelling.', file=sys.stderr) return 2
def paasta_push_to_registry(args): """Upload a docker image to a registry""" service = args.service if service and service.startswith('services-'): service = service.split('services-', 1)[1] validate_service_name(service, args.soa_dir) if not args.force: try: if is_docker_image_already_in_registry(service, args.soa_dir, args.commit): paasta_print( "The docker image is already in the PaaSTA docker registry. " "I'm NOT overriding the existing image. " "Add --force to override the image in the registry if you are sure what you are doing.", ) return 0 except RequestException as e: registry_uri = get_service_docker_registry(service, args.soa_dir) paasta_print("Can not connect to the PaaSTA docker registry '%s' to verify if this image exists.\n" "%s" % (registry_uri, str(e))) return 1 cmd = build_command(service, args.commit) loglines = [] returncode, output = _run( cmd, timeout=3600, log=True, component='build', service=service, loglevel='debug', ) if returncode != 0: loglines.append('ERROR: Failed to promote image for %s.' % args.commit) output = get_jenkins_build_output_url() if output: loglines.append('See output: %s' % output) else: loglines.append('Successfully pushed image for %s to registry' % args.commit) _log_audit( action='push-to-registry', action_details={'commit': args.commit}, service=service, ) for logline in loglines: _log( service=service, line=logline, component='build', level='event', ) return returncode
def execute_paasta_cluster_boost_on_remote_master( clusters, system_paasta_config, action, pool, duration=None, override=None, boost=None, verbose=0, ): """Returns a string containing an error message if an error occurred. Otherwise returns the output of run_paasta_cluster_boost(). """ result = {} for cluster in clusters: try: master = connectable_master(cluster, system_paasta_config) except NoMasterError as e: result[cluster] = (255, str(e)) result[cluster] = run_paasta_cluster_boost( master=master, action=action, pool=pool, duration=duration, override=override, boost=boost, verbose=verbose, ) audit_details = { 'boost_action': action, 'pool': pool, 'duration': duration, 'override': override, 'boost': boost, } _log_audit( action='cluster-boost', action_details=audit_details, cluster=cluster, ) aggregated_code = 0 aggregated_output = "" for cluster in result: code = result[cluster][0] output = result[cluster][1] if not code == 0: aggregated_code = 1 aggregated_output += f"\n{cluster}: \n{output}\n" return (aggregated_code, aggregated_output)
def paasta_generate_pipeline(args): """Generate a Jenkins build pipeline. :param args: argparse.Namespace obj created from sys.args by cli""" service = args.service or guess_service_name() soa_dir = DEFAULT_SOA_DIR try: validate_service_name(service, soa_dir=soa_dir) except NoSuchService as service_not_found: paasta_print(service_not_found) return 1 generate_pipeline(service=service, soa_dir=soa_dir) _log_audit(action='generate-pipeline', service=service)
def paasta_cook_image(args, service=None, soa_dir=None): """Build a docker image""" if not service: service = args.service if service.startswith("services-"): service = service.split("services-", 1)[1] if not soa_dir: soa_dir = args.yelpsoa_config_root validate_service_name(service, soa_dir) run_env = os.environ.copy() default_tag = "paasta-cook-image-{}-{}".format(service, get_username()) tag = run_env.get("DOCKER_TAG", default_tag) run_env["DOCKER_TAG"] = tag if not makefile_responds_to("cook-image"): print( "ERROR: local-run now requires a cook-image target to be present in the Makefile. See" "http://paasta.readthedocs.io/en/latest/about/contract.html", file=sys.stderr, ) return 1 try: cmd = "make cook-image" returncode, output = _run( cmd, env=run_env, log=True, component="build", service=service, loglevel="debug", ) if returncode != 0: _log( service=service, line="ERROR: make cook-image failed for %s." % service, component="build", level="event", ) else: action_details = {"tag": tag} _log_audit(action="cook-image", action_details=action_details, service=service) return returncode except KeyboardInterrupt: print("\nProcess interrupted by the user. Cancelling.", file=sys.stderr) return 2
def paasta_autoscale(args): log.setLevel(logging.DEBUG) service = figure_out_service_name(args) api = client.get_paasta_api_client(cluster=args.cluster, http_res=True) if not api: paasta_print( "Could not connect to paasta api. Maybe you misspelled the cluster?" ) return 1 try: if args.set is None: log.debug("Getting the current autoscaler count...") res, http = api.autoscaler.get_autoscaler_count( service=service, instance=args.instance ).result() else: log.debug(f"Setting desired instances to {args.set}.") body = {"desired_instances": int(args.set)} res, http = api.autoscaler.update_autoscaler_count( service=service, instance=args.instance, json_body=body ).result() _log_audit( action="manual-scale", action_details=body, service=service, instance=args.instance, cluster=args.cluster, ) except HTTPNotFound: paasta_print( PaastaColors.red( f"ERROR: '{args.instance}' is not configured to autoscale, " f"so paasta autoscale could not scale it up on demand. " f"If you want to be able to boost this service, please configure autoscaling for the service " f"in its config file by setting min and max instances. Example: \n" f"{args.instance}:\n" f" min_instances: 5\n" f" max_instances: 50" ) ) return 0 log.debug(f"Res: {res} Http: {http}") print(res["desired_instances"]) return 0
def paasta_secret(args): if args.shared: service = SHARED_SECRET_SERVICE if not args.clusters: print("A list of clusters is required for shared secrets.") sys.exit(1) else: service = args.service secret_provider = _get_secret_provider_for_service( service, cluster_names=args.clusters) if args.action in ["add", "update"]: plaintext = get_plaintext_input(args) if not plaintext: print("Warning: Given plaintext is an empty string.") secret_provider.write_secret( action=args.action, secret_name=args.secret_name, plaintext=plaintext, cross_environment_motivation=args.cross_env_motivation, ) secret_path = os.path.join(secret_provider.secret_dir, f"{args.secret_name}.json") _log_audit( action=f"{args.action}-secret", action_details={ "secret_name": args.secret_name, "clusters": args.clusters }, service=service, ) print_paasta_helper(secret_path, args.secret_name, args.shared) elif args.action == "decrypt": print( decrypt_secret(secret_provider=secret_provider, secret_name=args.secret_name), end="", ) else: print("Unknown action") sys.exit(1)
def paasta_rollback(args): """Call mark_for_deployment with rollback parameters :param args: contains all the arguments passed onto the script: service, deploy groups and sha. These arguments will be verified and passed onto mark_for_deployment. """ soa_dir = args.soa_dir service = figure_out_service_name(args, soa_dir) deploy_info = get_deploy_info(service=service, soa_dir=args.soa_dir) deploy_authz_check(deploy_info, service) git_url = get_git_url(service, soa_dir) given_deploy_groups = { deploy_group for deploy_group in args.deploy_groups.split(",") if deploy_group } all_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir) deploy_groups, invalid = validate_given_deploy_groups( all_deploy_groups, given_deploy_groups ) if len(invalid) > 0: print( PaastaColors.yellow( "These deploy groups are not valid and will be skipped: %s.\n" % (",").join(invalid) ) ) if len(deploy_groups) == 0: print( PaastaColors.red( "ERROR: No valid deploy groups specified for %s.\n" % (service) ) ) return 1 git_shas = get_git_shas_for_service(service, deploy_groups, soa_dir) commit = args.commit if not commit: print("Please specify a commit to mark for rollback (-k, --commit).") list_previous_commits( service, deploy_groups, bool(given_deploy_groups), git_shas ) return 1 elif commit not in git_shas and not args.force: print(PaastaColors.red("This Git SHA has never been deployed before.")) print("Please double check it or use --force to skip this verification.\n") list_previous_commits( service, deploy_groups, bool(given_deploy_groups), git_shas ) return 1 returncode = 0 for deploy_group in deploy_groups: rolled_back_from = get_currently_deployed_sha(service, deploy_group) returncode |= mark_for_deployment( git_url=git_url, service=service, deploy_group=deploy_group, commit=commit ) # we could also gate this by the return code from m-f-d, but we probably care more about someone wanting to # rollback than we care about if the underlying machinery was successfully able to complete the request if rolled_back_from != commit: audit_action_details = { "rolled_back_from": rolled_back_from, "rolled_back_to": commit, "rollback_type": RollbackTypes.USER_INITIATED_ROLLBACK.value, "deploy_group": deploy_group, } _log_audit( action="rollback", action_details=audit_action_details, service=service ) return returncode
def paasta_cook_image( args: Optional[argparse.Namespace], service: Optional[str] = None, soa_dir: Optional[str] = None, ) -> int: """Build a docker image""" if not service: if args is None: print( "ERROR: No arguments or service passed to cook-image - unable to determine what service to cook an image for", file=sys.stderr, ) return 1 service = args.service if service and service.startswith("services-"): service = service.split("services-", 1)[1] if not soa_dir: if args is None: print( "ERROR: No arguments or soadir passed to cook-image - unable to determine where to look for soa-configs", file=sys.stderr, ) return 1 soa_dir = args.yelpsoa_config_root validate_service_name(service, soa_dir) run_env = os.environ.copy() if args is not None and args.commit is not None: # if we're given a commit, we're likely being called by Jenkins or someone # trying to push the cooked image to our registry - as such, we should tag # the cooked image as `paasta itest` would. tag = build_docker_tag(service, args.commit, args.image_version) else: default_tag = "paasta-cook-image-{}-{}".format(service, get_username()) tag = run_env.get("DOCKER_TAG", default_tag) run_env["DOCKER_TAG"] = tag if not makefile_responds_to("cook-image"): print( "ERROR: local-run now requires a cook-image target to be present in the Makefile. See " "http://paasta.readthedocs.io/en/latest/about/contract.html.", file=sys.stderr, ) return 1 try: cmd = "make cook-image" returncode, output = _run( cmd, env=run_env, log=True, component="build", service=service, loglevel="debug", ) if returncode != 0: _log( service=service, line="ERROR: make cook-image failed for %s." % service, component="build", level="event", ) else: action_details = {"tag": tag} _log_audit( action="cook-image", action_details=action_details, service=service ) return returncode except KeyboardInterrupt: print("\nProcess interrupted by the user. Cancelling.", file=sys.stderr) return 2
def paasta_rerun(args): """Reruns a Chronos job. :param args: argparse.Namespace obj created from sys.args by cli""" system_paasta_config = load_system_paasta_config() soa_dir = args.soa_dir service = figure_out_service_name( args, soa_dir) # exit with an error if the service doesn't exist if args.execution_date: execution_date = args.execution_date else: execution_date = None all_clusters = list_clusters(soa_dir=soa_dir) actual_deployments = get_actual_deployments( service, soa_dir) # cluster.instance: sha if actual_deployments: deploy_pipeline = list(get_planned_deployments( service, soa_dir)) # cluster.instance deployed_clusters = list_deployed_clusters(deploy_pipeline, actual_deployments) deployed_cluster_instance = _get_cluster_instance( actual_deployments.keys()) if args.clusters is not None: clusters = args.clusters.split(",") else: clusters = deployed_clusters for cluster in clusters: paasta_print("cluster: %s" % cluster) if cluster not in all_clusters: paasta_print( " Warning: \"%s\" does not look like a valid cluster." % cluster) continue if cluster not in deployed_clusters: paasta_print( f" Warning: service \"{service}\" has not been deployed to \"{cluster}\" yet." ) continue if not deployed_cluster_instance[cluster].get(args.instance, False): paasta_print((" Warning: instance \"%s\" is either invalid " "or has not been deployed to \"%s\" yet." % (args.instance, cluster))) continue try: chronos_job_config = chronos_tools.load_chronos_job_config( service, args.instance, cluster, load_deployments=False, soa_dir=soa_dir, ) if chronos_tools.uses_time_variables( chronos_job_config) and execution_date is None: paasta_print( (" Warning: \"%s\" uses time variables interpolation, " "please supply a `--execution_date` argument." % args.instance)) continue except NoConfigurationForServiceError as e: paasta_print(" Warning: %s" % e) continue if execution_date is None: execution_date = _get_default_execution_date() related_job_configs = get_related_jobs_configs(cluster, service, args.instance) if not args.rerun_type and len(related_job_configs) > 1: instance_names = sorted([ f'- {srv}{chronos_tools.INTERNAL_SPACER}{inst}' for srv, inst in related_job_configs if srv != service or inst != args.instance ]) paasta_print(PaastaColors.red(' error')) paasta_print( 'Instance {instance} has dependency relations with the following jobs:\n' '{relations}\n' '\n' 'Please specify the rerun policy via --rerun-type argument'. format( instance=args.instance, relations='\n'.join(instance_names), ), ) return formatted_execution_date = execution_date.strftime( chronos_tools.EXECUTION_DATE_FORMAT) rc, output = execute_chronos_rerun_on_remote_master( service=service, instancename=args.instance, cluster=cluster, verbose=args.verbose, execution_date=formatted_execution_date, system_paasta_config=system_paasta_config, run_all_related_jobs=args.rerun_type == 'graph', force_disabled=args.force_disabled, ) if rc == 0: paasta_print(PaastaColors.green(' successfully created job')) _log_audit( action='chronos-rerun', action_details={ 'rerun_type': args.rerun_type, 'execution_date': formatted_execution_date, }, service=service, instance=args.instance, cluster=cluster, ) else: paasta_print(PaastaColors.red(' error')) paasta_print(output)
def paasta_rollback(args: argparse.Namespace) -> int: """Call mark_for_deployment with rollback parameters :param args: contains all the arguments passed onto the script: service, deploy groups and sha. These arguments will be verified and passed onto mark_for_deployment. """ soa_dir = args.soa_dir service = figure_out_service_name(args, soa_dir) deploy_info = get_deploy_info(service=service, soa_dir=args.soa_dir) if not can_user_deploy_service(deploy_info, service): return 1 git_url = get_git_url(service, soa_dir) if args.all_deploy_groups: given_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir) else: given_deploy_groups = { deploy_group for deploy_group in args.deploy_groups.split(",") if deploy_group } all_deploy_groups = list_deploy_groups(service=service, soa_dir=soa_dir) deploy_groups, invalid = validate_given_deploy_groups( all_deploy_groups, given_deploy_groups) if len(invalid) > 0: print( PaastaColors.yellow( "These deploy groups are not valid and will be skipped: %s.\n" % (",").join(invalid))) if len(deploy_groups) == 0 and not args.all_deploy_groups: print( PaastaColors.red( "ERROR: No valid deploy groups specified for %s.\n Use the flag -a to rollback all valid deploy groups for this service" % (service))) return 1 versions = get_versions_for_service(service, deploy_groups, soa_dir) commit = args.commit image_version = args.image_version new_version = DeploymentVersion(sha=commit, image_version=image_version) if not commit: print("Please specify a commit to mark for rollback (-k, --commit).") list_previous_versions(service, deploy_groups, bool(given_deploy_groups), versions) return 1 elif new_version not in versions and not args.force: print( PaastaColors.red( f"This version {new_version} has never been deployed before.")) print( "Please double check it or use --force to skip this verification.\n" ) list_previous_versions(service, deploy_groups, bool(given_deploy_groups), versions) return 1 # TODO: Add similar check for when image_version is empty and no-commit redeploys is enforced for requested deploy_group returncode = 0 for deploy_group in deploy_groups: rolled_back_from = get_currently_deployed_version( service, deploy_group) returncode |= mark_for_deployment( git_url=git_url, service=service, deploy_group=deploy_group, commit=commit, image_version=image_version, ) # we could also gate this by the return code from m-f-d, but we probably care more about someone wanting to # rollback than we care about if the underlying machinery was successfully able to complete the request if rolled_back_from != new_version: audit_action_details = { "rolled_back_from": str(rolled_back_from), "rolled_back_to": str(new_version), "rollback_type": RollbackTypes.USER_INITIATED_ROLLBACK.value, "deploy_group": deploy_group, } _log_audit(action="rollback", action_details=audit_action_details, service=service) return returncode