Ejemplo n.º 1
0
def test_verify_stack_creation_retry(boto3_stubber, mocker):
    sleep_mock = mocker.patch("pcluster.utils.time.sleep")
    mocker.patch(
        "pcluster.utils.get_stack",
        side_effect=[{
            "StackStatus": "CREATE_IN_PROGRESS"
        }, {
            "StackStatus": "CREATE_FAILED"
        }],
    )
    mocked_requests = [
        MockedBoto3Request(
            method="describe_stack_events",
            response="Error",
            expected_params={"StackName": FAKE_STACK_NAME},
            generate_error=True,
            error_code="Throttling",
        ),
        MockedBoto3Request(
            method="describe_stack_events",
            response={"StackEvents": [_generate_stack_event()]},
            expected_params={"StackName": FAKE_STACK_NAME},
        ),
    ]
    client = boto3_stubber("cloudformation", mocked_requests * 2)
    assert_that(utils.verify_stack_creation(FAKE_STACK_NAME,
                                            client)).is_false()
    sleep_mock.assert_called_with(5)
Ejemplo n.º 2
0
def _create_network_stack(configuration, parameters):
    LOGGER.info("Creating CloudFormation stack...")
    LOGGER.info("Do not leave the terminal until the process has finished")
    stack_name = "parallelclusternetworking-{0}{1}".format(configuration.stack_name_prefix, TIMESTAMP)
    version = pkg_resources.get_distribution("aws-parallelcluster").version
    try:
        cfn_client = boto3.client("cloudformation")
        stack = cfn_client.create_stack(
            StackName=stack_name,
            TemplateURL=get_templates_bucket_path()
            + "networking/%s-%s.cfn.json" % (configuration.template_name, version),
            Parameters=parameters,
            Capabilities=["CAPABILITY_IAM"],
        )
        LOGGER.debug("StackId: {0}".format(stack.get("StackId")))
        LOGGER.info("Stack Name: {0}".format(stack_name))
        if not verify_stack_creation(stack_name, cfn_client):
            LOGGER.error("Could not create the network configuration")
            sys.exit(0)
        print()
        LOGGER.info("The stack has been created")
        return get_stack(stack_name, cfn_client).get("Outputs")
    except KeyboardInterrupt:
        print()
        LOGGER.info(
            "Unable to update the configuration file with the selected network configuration. "
            "Please manually check the status of the CloudFormation stack: {0}".format(stack_name)
        )
    except Exception as e:  # Any exception is a problem
        print()
        LOGGER.error(
            "An exception occured while creating the CloudFormation stack: {0}. "
            "For details please check log file: {1}".format(stack_name, get_cli_log_file())
        )
        LOGGER.critical(e)
        sys.exit(1)
Ejemplo n.º 3
0
def create(args):  # noqa: C901 FIXME!!!
    LOGGER.info("Beginning cluster creation for cluster: %s", args.cluster_name)
    LOGGER.debug("Building cluster config based on args %s", str(args))

    _validate_cluster_name(args.cluster_name)

    # Build the config based on args
    pcluster_config = PclusterConfig(
        config_file=args.config_file, cluster_label=args.cluster_template, fail_on_file_absence=True
    )
    pcluster_config.validate()

    # Automatic SIT -> HIT conversion, if needed
    HitConverter(pcluster_config).convert()

    # get CFN parameters, template url and tags from config
    storage_data = pcluster_config.to_storage()
    cfn_params = storage_data.cfn_params

    _check_for_updates(pcluster_config)

    bucket_name = None
    artifact_directory = None
    cleanup_bucket = False
    try:
        cfn_client = boto3.client("cloudformation")
        stack_name = utils.get_stack_name(args.cluster_name)

        # merge tags from configuration, command-line and internal ones
        tags = _evaluate_tags(pcluster_config, preferred_tags=args.tags)

        bucket_name, artifact_directory, cleanup_bucket = _setup_bucket_with_resources(
            pcluster_config, storage_data, stack_name, tags
        )
        cfn_params["ResourcesS3Bucket"] = bucket_name
        cfn_params["ArtifactS3RootDirectory"] = artifact_directory
        cfn_params["RemoveBucketOnDeletion"] = str(cleanup_bucket)

        LOGGER.info("Creating stack named: %s", stack_name)

        # determine the CloudFormation Template URL to use
        template_url = evaluate_pcluster_template_url(pcluster_config, preferred_template_url=args.template_url)

        # append extra parameters from command-line
        if args.extra_parameters:
            LOGGER.debug("Adding extra parameters to the CFN parameters")
            cfn_params.update(dict(args.extra_parameters))

        # prepare input parameters for stack creation and create the stack
        LOGGER.debug(cfn_params)
        params = [{"ParameterKey": key, "ParameterValue": value} for key, value in cfn_params.items()]
        stack = cfn_client.create_stack(
            StackName=stack_name,
            TemplateURL=template_url,
            Parameters=params,
            Capabilities=["CAPABILITY_IAM"],
            DisableRollback=args.norollback,
            Tags=tags,
        )
        LOGGER.debug("StackId: %s", stack.get("StackId"))

        if not args.nowait:
            verified = utils.verify_stack_creation(stack_name, cfn_client)
            LOGGER.info("")
            result_stack = utils.get_stack(stack_name, cfn_client)
            _print_stack_outputs(result_stack)
            if not verified:
                sys.exit(1)
        else:
            stack_status = utils.get_stack(stack_name, cfn_client).get("StackStatus")
            LOGGER.info("Status: %s", stack_status)
    except ClientError as e:
        LOGGER.critical(e.response.get("Error").get("Message"))
        sys.stdout.flush()
        utils.cleanup_s3_resources(bucket_name, artifact_directory, cleanup_bucket)
        sys.exit(1)
    except KeyboardInterrupt:
        LOGGER.info("\nExiting...")
        if not utils.stack_exists(stack_name):
            # Cleanup S3 artifacts if stack is not created yet
            utils.cleanup_s3_resources(bucket_name, artifact_directory, cleanup_bucket)
        sys.exit(0)
    except KeyError as e:
        LOGGER.critical("ERROR: KeyError - reason:\n%s", e)
        utils.cleanup_s3_resources(bucket_name, artifact_directory, cleanup_bucket)
        sys.exit(1)
    except Exception as e:
        LOGGER.critical(e)
        utils.cleanup_s3_resources(bucket_name, artifact_directory, cleanup_bucket)
        sys.exit(1)
Ejemplo n.º 4
0
def create(args):  # noqa: C901 FIXME!!!
    LOGGER.info("Beginning cluster creation for cluster: %s",
                args.cluster_name)
    LOGGER.debug("Building cluster config based on args %s", str(args))

    # Build the config based on args
    pcluster_config = PclusterConfig(config_file=args.config_file,
                                     cluster_label=args.cluster_template,
                                     fail_on_file_absence=True)
    pcluster_config.validate()
    # get CFN parameters, template url and tags from config
    cluster_section = pcluster_config.get_section("cluster")
    cfn_params = pcluster_config.to_cfn()

    _check_for_updates(pcluster_config)

    batch_temporary_bucket = None
    try:
        cfn_client = boto3.client("cloudformation")
        stack_name = utils.get_stack_name(args.cluster_name)

        # If scheduler is awsbatch create bucket with resources
        if cluster_section.get_param_value("scheduler") == "awsbatch":
            batch_resources = pkg_resources.resource_filename(
                __name__, "resources/batch")
            batch_temporary_bucket = _create_bucket_with_batch_resources(
                stack_name=stack_name,
                resources_dir=batch_resources,
                region=pcluster_config.region)
            cfn_params["ResourcesS3Bucket"] = batch_temporary_bucket

        LOGGER.info("Creating stack named: %s", stack_name)
        LOGGER.debug(cfn_params)

        # determine the CloudFormation Template URL to use
        template_url = _evaluate_pcluster_template_url(
            pcluster_config, preferred_template_url=args.template_url)

        # merge tags from configuration, command-line and internal ones
        tags = _evaluate_tags(pcluster_config, preferred_tags=args.tags)

        # append extra parameters from command-line
        if args.extra_parameters:
            LOGGER.debug("Adding extra parameters to the CFN parameters")
            cfn_params.update(dict(args.extra_parameters))

        # prepare input parameters for stack creation and create the stack
        LOGGER.debug(cfn_params)
        params = [{
            "ParameterKey": key,
            "ParameterValue": value
        } for key, value in cfn_params.items()]
        stack = cfn_client.create_stack(
            StackName=stack_name,
            TemplateURL=template_url,
            Parameters=params,
            Capabilities=["CAPABILITY_IAM"],
            DisableRollback=args.norollback,
            Tags=tags,
        )
        LOGGER.debug("StackId: %s", stack.get("StackId"))

        if not args.nowait:
            utils.verify_stack_creation(stack_name, cfn_client)
            LOGGER.info("")
            result_stack = utils.get_stack(stack_name, cfn_client)
            _print_stack_outputs(result_stack)
        else:
            stack_status = utils.get_stack(stack_name,
                                           cfn_client).get("StackStatus")
            LOGGER.info("Status: %s", stack_status)
    except ClientError as e:
        LOGGER.critical(e.response.get("Error").get("Message"))
        sys.stdout.flush()
        if batch_temporary_bucket:
            utils.delete_s3_bucket(bucket_name=batch_temporary_bucket)
        sys.exit(1)
    except KeyboardInterrupt:
        LOGGER.info("\nExiting...")
        sys.exit(0)
    except KeyError as e:
        LOGGER.critical("ERROR: KeyError - reason:")
        LOGGER.critical(e)
        if batch_temporary_bucket:
            utils.delete_s3_bucket(bucket_name=batch_temporary_bucket)
        sys.exit(1)
    except Exception as e:
        LOGGER.critical(e)
        if batch_temporary_bucket:
            utils.delete_s3_bucket(bucket_name=batch_temporary_bucket)
        sys.exit(1)