예제 #1
0
def issue_state_change_for_service(service_config, force_bounce,
                                   desired_state):
    ref_mutator = make_mutate_refs_func(
        service_config=service_config,
        force_bounce=force_bounce,
        desired_state=desired_state,
    )
    git_url = utils.get_git_url(service_config.get_service())
    remote_git.create_remote_refs(git_url, ref_mutator)
    if "yelpcorp.com" in git_url:
        trigger_deploys(service_config.get_service())
    log_event(service_config=service_config, desired_state=desired_state)
예제 #2
0
def test_trigger_deploys(mock_socket, mock_load_config):
    mock_load_config.return_value = SystemPaastaConfig({}, "/some/fake/dir")
    mock_client = mock_socket.return_value

    utils.trigger_deploys("a_service")

    assert mock_load_config.call_count == 1
    assert mock_client.connect.call_args_list == [
        mock.call(("sysgit.yelpcorp.com", 5049))
    ]
    assert mock_client.send.call_args_list == [
        mock.call("a_service\n".encode("utf-8"))
    ]
    assert mock_client.close.call_count == 1
예제 #3
0
def main(args):
    changes_made = False
    updater = AutoConfigUpdater(
        config_source=args.source_id,
        git_remote=args.git_remote or get_default_git_remote(),
        branch=args.branch,
        working_dir=args.local_dir or "/nail/tmp",
        do_clone=args.local_dir is None,
    )
    with updater:
        deploy_file = updater.get_existing_configs(args.service, "deploy")
        smartstack_file = updater.get_existing_configs(args.service,
                                                       "smartstack")
        shard_deploy_groups = {
            f"{prefix}.{args.shard_name}"
            for prefix in DEPLOY_MAPPINGS.keys()
        }
        pipeline_steps = {step["step"] for step in deploy_file["pipeline"]}

        if not shard_deploy_groups.issubset(pipeline_steps):
            changes_made = True
            steps_to_add = shard_deploy_groups - pipeline_steps

            # If the pipeline does not contain deploy groups for the service shard
            # Add the missing steps and write to deploy config
            for step in steps_to_add:
                deploy_file["pipeline"].append({
                    "step": step,
                    "wait_for_deployment": True,
                    "disabled": True,
                })
                log.info(f"{step} added to deploy config")
            updater.write_configs(args.service, "deploy", deploy_file)

            for deploy_prefix, config_paths in DEPLOY_MAPPINGS.items():
                for config_path in config_paths:
                    kube_file = updater.get_existing_configs(
                        args.service, config_path)
                    instance_config = {
                        "deploy_group":
                        f"{deploy_prefix}.{args.shard_name}",
                        "min_instances":
                        args.min_instance_count,
                        "max_instances":
                        args.prod_max_instance_count if deploy_prefix == "prod"
                        else args.non_prod_max_instance_count,
                        "env": {
                            "PAASTA_SECRET_BUGSNAG_API_KEY":
                            "SECRET(bugsnag_api_key)",
                        },
                    }
                    if args.metrics_provider is not None or args.setpoint is not None:
                        instance_config["autoscaling"] = {}
                        if args.metrics_provider is not None:
                            instance_config["autoscaling"][
                                "metrics_provider"] = args.metrics_provider
                        if args.setpoint is not None:
                            instance_config["autoscaling"][
                                "setpoint"] = args.setpoint
                    if args.cpus is not None:
                        instance_config["cpus"] = args.cpus
                    if args.mem is not None:
                        instance_config["mem"] = args.mem
                    # If the service config does not contain definitions for the shard in each ecosystem
                    # Add the missing definition and write to the corresponding config
                    if args.shard_name not in kube_file.keys():
                        kube_file[args.shard_name] = instance_config
                        updater.write_configs(args.service, config_path,
                                              kube_file)
                        log.info(
                            f"{deploy_prefix}.{args.shard_name} added to {config_path}"
                        )
        else:
            log.info(f"{args.shard_name} is in deploy config already.")

        # If the service shard is not defined in smartstack
        # Add the definition with a suggested proxy port
        if args.shard_name not in smartstack_file.keys():
            changes_made = True
            smartstack_file[args.shard_name] = {
                "proxy_port": None,
                "extra_advertise": {
                    "ecosystem:devc": ["ecosystem:devc"]
                },
            }
            if args.timeout_client_ms:
                smartstack_file[args.shard_name][
                    "timeout_client_ms"] = args.timeout_client_ms
            if args.timeout_server_ms:
                smartstack_file[args.shard_name][
                    "timeout_server_ms"] = args.timeout_server_ms
            updater.write_configs(args.service, "smartstack", smartstack_file)
        else:
            log.info(
                f"{args.shard_name} is in smartstack config already, skipping."
            )

        # Only commit to remote if changes were made
        if changes_made:
            updater.commit_to_remote()
            trigger_deploys(args.service)
        else:
            # exit with code to indicate nothing was changed
            sys.exit(129)