Exemplo n.º 1
0
def use(management_ip, provider, rest_port):
    logger = get_logger()
    # first check this server is available.
    client = utils.get_rest_client(manager_ip=management_ip,
                                   rest_port=rest_port)
    try:
        status_result = client.manager.get_status()
    except CloudifyClientError:
        status_result = None
    if not status_result:
        msg = ("Can't use management server {0}: No response.".format(
            management_ip))
        raise CloudifyCliError(msg)

    # check if cloudify was initialized.
    if not utils.is_initialized():
        utils.dump_cloudify_working_dir_settings()
        utils.dump_configuration_file()

    try:
        response = utils.get_rest_client(management_ip).manager.get_context()
        provider_name = response['name']
        provider_context = response['context']
    except CloudifyClientError:
        provider_name = None
        provider_context = None

    with utils.update_wd_settings() as wd_settings:
        wd_settings.set_management_server(management_ip)
        wd_settings.set_provider_context(provider_context)
        wd_settings.set_provider(provider_name)
        wd_settings.set_rest_port(rest_port)
        wd_settings.set_is_provider_config(provider)
        logger.info('Using management server {0} with port {1}'.format(
            management_ip, rest_port))
Exemplo n.º 2
0
def use(management_ip, rest_port):
    logger = get_logger()
    # first check this server is available.
    client = utils.get_rest_client(
        manager_ip=management_ip, rest_port=rest_port)
    try:
        status_result = client.manager.get_status()
    except CloudifyClientError:
        status_result = None
    if not status_result:
        msg = ("Can't use management server {0}: No response."
               .format(management_ip))
        raise CloudifyCliError(msg)

    # check if cloudify was initialized.
    if not utils.is_initialized():
        utils.dump_cloudify_working_dir_settings()
        utils.dump_configuration_file()

    try:
        response = utils.get_rest_client(
            management_ip).manager.get_context()
        provider_context = response['context']
    except CloudifyClientError:
        provider_context = None

    with utils.update_wd_settings() as wd_settings:
        wd_settings.set_management_server(management_ip)
        wd_settings.set_provider_context(provider_context)
        wd_settings.set_rest_port(rest_port)
        logger.info('Using management server {0} with port {1}'
                    .format(management_ip, rest_port))

    # delete the previous manager deployment if exists.
    bs.delete_workdir()
Exemplo n.º 3
0
    def _test(self, skip_generation=False, reset=False):
        requests_path = self.workdir / 'requests.json'
        port = self._start_server(requests_path)

        self.claw.generate(tests.STUB_CONFIGURATION)
        conf = configuration.Configuration(tests.STUB_CONFIGURATION)

        with conf.dir:
            sh.cfy.init()
            with cli_utils.update_wd_settings() as wd_settings:
                wd_settings.set_management_server('localhost')
                wd_settings.set_rest_port(port)

        blueprint_conf = conf.blueprint(tests.STUB_BLUEPRINT)

        if skip_generation or reset:
            self.claw('generate-blueprint', tests.STUB_CONFIGURATION,
                      tests.STUB_BLUEPRINT)

        if skip_generation:
            with blueprint_conf.patch.inputs as patch:
                first_key = patch.obj.keys()[0]
                patch.obj[first_key] = 'SOME_OTHER_VALUE'

        # sanity
        if reset:
            with self.assertRaises(sh.ErrorReturnCode) as c:
                self.claw.deploy(tests.STUB_CONFIGURATION,
                                 tests.STUB_BLUEPRINT)
            self.assertIn('Already initialized', c.exception.stderr)

        self.claw.deploy(tests.STUB_CONFIGURATION,
                         tests.STUB_BLUEPRINT,
                         skip_generation=skip_generation,
                         reset=reset)

        requests = json.loads(requests_path.text())
        blueprint, deployment, execution = requests
        self.assertEqual(blueprint, {'blueprint': [tests.STUB_BLUEPRINT]})
        self.assertEqual(
            deployment, {
                'deployment': [
                    tests.STUB_BLUEPRINT, {
                        'blueprint_id': tests.STUB_BLUEPRINT,
                        'inputs': blueprint_conf.inputs
                    }
                ]
            })
        self.assertEqual(
            execution, {
                'execution': [{
                    'deployment_id': tests.STUB_BLUEPRINT,
                    'parameters': None,
                    'allow_custom_parameters': 'false',
                    'workflow_id': 'install',
                    'force': 'false'
                }]
            })
Exemplo n.º 4
0
def teardown(force, ignore_deployments):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    client = utils.get_rest_client(management_ip)
    try:
        if not ignore_deployments and len(client.deployments.list()) > 0:
            msg = \
                ("Manager server {0} has existing deployments. Delete all "
                 "deployments first or add the '--ignore-deployments' flag to "
                 "your command to ignore these deployments and execute "
                 "teardown.".format(management_ip))
            raise exceptions.CloudifyCliError(msg)
    except IOError:
        msg = \
            "Failed querying manager server {0} about existing " \
            "deployments; The Manager server may be down. If you wish to " \
            'skip this check, you may use the "--ignore-deployments" flag, ' \
            'in which case teardown will occur regardless of the Manager ' \
            "server's status.".format(management_ip)
        raise exceptions.CloudifyCliError(msg)

    logger.info("tearing down {0}".format(management_ip))

    # runtime properties might have changed since the last time we
    # executed 'use', because of recovery. so we need to retrieve
    # the provider context again
    try:
        logger.info('Retrieving provider context')
        management_ip = utils.get_management_server_ip()
        use(management_ip, utils.get_rest_port())
    except BaseException as e:
        logger.warning('Failed retrieving provider context: {0}. This '
                       'may cause a leaking management server '
                       'in case it has gone through a '
                       'recovery process'.format(str(e)))

    # reload settings since the provider context maybe changed
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.teardown()

    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()

    logger.info("teardown complete")
Exemplo n.º 5
0
    def test_teardown_no_management_ip_in_context_right_directory(
            self, mock_load_env, mock_teardown):  # NOQA
        cli_runner.run_cli('cfy init')

        with update_wd_settings() as wd:
            wd.set_provider_context({})

        cli_runner.run_cli('cfy teardown -f')
        mock_teardown.assert_called_once_with()
        mock_load_env.assert_called_once_with()
Exemplo n.º 6
0
    def test_teardown_no_management_ip_in_context_right_directory(
            self, mock_load_env, mock_teardown):  # NOQA
        cli_runner.run_cli('cfy init')

        with update_wd_settings() as wd:
            wd.set_provider_context({})

        cli_runner.run_cli('cfy teardown -f')
        mock_teardown.assert_called_once_with()
        mock_load_env.assert_called_once_with()
Exemplo n.º 7
0
def _do_teardown():
    # reload settings since the provider context maybe changed
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(provider_context.get("cloudify", {}).get("manager_deployment"))
    bs.teardown()
    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()
Exemplo n.º 8
0
def _do_teardown():
    # reload settings since the provider context maybe changed
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.teardown()
    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()
Exemplo n.º 9
0
    def _test(self, skip_generation=False, reset=False):
        requests_path = self.workdir / 'requests.json'
        port = self._start_server(requests_path)

        self.claw.generate(tests.STUB_CONFIGURATION)
        conf = configuration.Configuration(tests.STUB_CONFIGURATION)

        with conf.dir:
            sh.cfy.init()
            with cli_utils.update_wd_settings() as wd_settings:
                wd_settings.set_management_server('localhost')
                wd_settings.set_rest_port(port)

        blueprint_conf = conf.blueprint(tests.STUB_BLUEPRINT)

        if skip_generation or reset:
            self.claw('generate-blueprint',
                      tests.STUB_CONFIGURATION,
                      tests.STUB_BLUEPRINT)

        if skip_generation:
            with blueprint_conf.patch.inputs as patch:
                first_key = patch.obj.keys()[0]
                patch.obj[first_key] = 'SOME_OTHER_VALUE'

        # sanity
        if reset:
            with self.assertRaises(sh.ErrorReturnCode) as c:
                self.claw.deploy(tests.STUB_CONFIGURATION,
                                 tests.STUB_BLUEPRINT)
            self.assertIn('Configuration already initialized',
                          c.exception.stderr)

        self.claw.deploy(tests.STUB_CONFIGURATION, tests.STUB_BLUEPRINT,
                         skip_generation=skip_generation,
                         reset=reset)

        requests = json.loads(requests_path.text())
        blueprint, deployment, execution = requests
        self.assertEqual(blueprint, {
            'blueprint': [tests.STUB_BLUEPRINT]})
        self.assertEqual(deployment, {
            'deployment': [tests.STUB_BLUEPRINT, {
                'blueprint_id': tests.STUB_BLUEPRINT,
                'inputs': blueprint_conf.inputs}]})
        self.assertEqual(execution, {
            'execution': [{
                'deployment_id': tests.STUB_BLUEPRINT,
                'parameters': None,
                'allow_custom_parameters': 'false',
                'workflow_id': 'install',
                'force': 'false'
            }]})
Exemplo n.º 10
0
    def test_recover_from_same_directory_as_bootstrap(self, *_):
        cli_runner.run_cli("cfy init")

        # mock bootstrap behavior by setting the management key path
        # in the local context
        key_path = os.path.join(TEST_WORK_DIR, "key.pem")
        open(key_path, "w").close()

        with update_wd_settings() as wd:
            wd.set_management_key(key_path)
            wd.set_provider_context({})

        # now run recovery and make sure no exception was raised
        cli_runner.run_cli("cfy recover -f -s {0}".format(key_path))
Exemplo n.º 11
0
    def test_recover_from_same_directory_as_bootstrap_missing_key(self, *_):
        cli_runner.run_cli("cfy init")

        # mock bootstrap behavior by setting the management key path
        # in the local context. however, don't actually create the key file
        key_path = os.path.join(TEST_WORK_DIR, "key.pem")

        with update_wd_settings() as wd:
            wd.set_management_key(key_path)
            wd.set_provider_context({})

        # recovery command should not fail because the key file specified in
        # the context file does not exist
        self._assert_ex("cfy recover -f", "Cannot perform recovery. manager key " "file does not exist")
Exemplo n.º 12
0
    def test_recover_from_same_directory_as_bootstrap(self, *_):
        cli_runner.run_cli('cfy init')

        # mock bootstrap behavior by setting the management key path
        # in the local context
        key_path = os.path.join(TEST_WORK_DIR, 'key.pem')
        open(key_path, 'w').close()

        with update_wd_settings() as wd:
            wd.set_management_key(key_path)
            wd.set_provider_context({})

        # now run recovery and make sure no exception was raised
        cli_runner.run_cli('cfy recover -f -s {0}'.format(key_path))
Exemplo n.º 13
0
    def test_recover_from_different_directory_than_bootstrap_with_env_variable(self, *_):  # NOQA
        cli_runner.run_cli("cfy init")

        key_path = os.path.join(TEST_WORK_DIR, "key.pem")
        open(key_path, "w").close()

        # mock provider context
        with update_wd_settings() as wd:
            wd.set_provider_context({})

        try:
            os.environ["CLOUDIFY_MANAGER_PRIVATE_KEY_PATH"] = key_path
            cli_runner.run_cli("cfy recover -f -s {0}".format(key_path))
        finally:
            del os.environ["CLOUDIFY_MANAGER_PRIVATE_KEY_PATH"]
Exemplo n.º 14
0
    def test_recover_from_different_directory_than_bootstrap_with_env_variable(self, *_):  # NOQA
        cli_runner.run_cli('cfy init')

        key_path = os.path.join(TEST_WORK_DIR, 'key.pem')
        open(key_path, 'w').close()

        # mock provider context
        with update_wd_settings() as wd:
            wd.set_provider_context({})

        try:
            os.environ['CLOUDIFY_MANAGER_PRIVATE_KEY_PATH'] = key_path
            cli_runner.run_cli('cfy recover -f')
        finally:
            del os.environ['CLOUDIFY_MANAGER_PRIVATE_KEY_PATH']
Exemplo n.º 15
0
    def test_recover_from_same_directory_as_bootstrap_missing_key(self, *_):
        cli_runner.run_cli('cfy init')

        # mock bootstrap behavior by setting the management key path
        # in the local context. however, don't actually create the key file
        key_path = os.path.join(TEST_WORK_DIR, 'key.pem')

        with update_wd_settings() as wd:
            wd.set_management_key(key_path)
            wd.set_provider_context({})

        # recovery command should not fail because the key file specified in
        # the context file does not exist
        self._assert_ex('cfy recover -f',
                        'Cannot perform recovery. manager key '
                        'file does not exist')
Exemplo n.º 16
0
def _update_manager_deployment(local_only=False):

    # get the current provider from the runtime property set on bootstrap
    provider_context = ctx.instance.runtime_properties[PROVIDER_RUNTIME_PROPERTY]

    # construct new manager deployment
    provider_context["cloudify"]["manager_deployment"] = _dump_manager_deployment()

    # update locally
    ctx.instance.runtime_properties[PROVIDER_RUNTIME_PROPERTY] = provider_context
    with utils.update_wd_settings() as wd_settings:
        wd_settings.set_provider_context(provider_context)

    if not local_only:
        # update on server
        rest_client = utils.get_rest_client()
        rest_client.manager.update_context("provider", provider_context)
Exemplo n.º 17
0
    def test_teardown_manager_down_ignore_deployments(self, mock_teardown):

        def raise_client_error():
            raise CloudifyClientError('this is an IOError')

        self.client.deployments.list = raise_client_error
        self.client.manager.get_context = MagicMock(
            return_value={'name': 'mock_provider', 'context': {'key': 'value'}}
        )
        cli_runner.run_cli('cfy init')

        with update_wd_settings() as wd:
            wd.set_management_server('10.0.0.1')
            wd.set_provider_context({})

        cli_runner.run_cli('cfy teardown -f --ignore-deployments')
        mock_teardown.assert_called_once_with()
Exemplo n.º 18
0
def use(management_ip, rest_port):
    logger = get_logger()

    # check if cloudify was initialized.
    if not utils.is_initialized():
        utils.dump_cloudify_working_dir_settings()
        utils.dump_configuration_file()

    # determine SSL mode by port
    if rest_port == constants.SECURED_REST_PORT:
        protocol = constants.SECURED_PROTOCOL
    else:
        protocol = constants.DEFAULT_PROTOCOL
    client = utils.get_rest_client(
        rest_host=management_ip, rest_port=rest_port, rest_protocol=protocol,
        skip_version_check=True)
    try:
        # first check this server is available.
        client.manager.get_status()
    except UserUnauthorizedError:
        msg = "Can't use manager {0}: User is unauthorized.".format(
            management_ip)
        raise CloudifyCliError(msg)
    except CloudifyClientError as e:
        msg = "Can't use manager {0}: {1}".format(management_ip, str(e))
        raise CloudifyCliError(msg)

    try:
        response = client.manager.get_context()
        provider_context = response['context']
    except CloudifyClientError:
        provider_context = None

    with utils.update_wd_settings() as wd_settings:
        wd_settings.set_management_server(management_ip)
        wd_settings.set_provider_context(provider_context)
        wd_settings.set_rest_port(rest_port)
        wd_settings.set_rest_protocol(protocol)
        logger.info('Using management server {0} with port {1}'
                    .format(management_ip, rest_port))

    # delete the previous manager deployment if exists.
    bs.delete_workdir()
Exemplo n.º 19
0
def use(management_ip, rest_port):
    logger = get_logger()
    # determine SSL mode by port
    if rest_port == constants.SECURED_REST_PORT:
        protocol = constants.SECURED_PROTOCOL
    else:
        protocol = constants.DEFAULT_PROTOCOL
    client = utils.get_rest_client(manager_ip=management_ip,
                                   rest_port=rest_port,
                                   protocol=protocol)
    try:
        # first check this server is available.
        client.manager.get_status()
    except UserUnauthorizedError:
        msg = "Can't use management server {0}: User is unauthorized.".format(
            management_ip)
        raise CloudifyCliError(msg)
    except CloudifyClientError:
        msg = "Can't use management server {0}: No response.".format(
            management_ip)
        raise CloudifyCliError(msg)

    # check if cloudify was initialized.
    if not utils.is_initialized():
        utils.dump_cloudify_working_dir_settings()
        utils.dump_configuration_file()

    try:
        response = client.manager.get_context()
        provider_context = response['context']
    except CloudifyClientError:
        provider_context = None

    with utils.update_wd_settings() as wd_settings:
        wd_settings.set_management_server(management_ip)
        wd_settings.set_provider_context(provider_context)
        wd_settings.set_rest_port(rest_port)
        wd_settings.set_protocol(protocol)
        logger.info('Using management server {0} with port {1}'.format(
            management_ip, rest_port))

    # delete the previous manager deployment if exists.
    bs.delete_workdir()
Exemplo n.º 20
0
    def test_teardown_manager_down_ignore_deployments(self, mock_teardown):
        def raise_client_error():
            raise CloudifyClientError('this is an IOError')

        self.client.deployments.list = raise_client_error
        self.client.manager.get_context = MagicMock(return_value={
            'name': 'mock_provider',
            'context': {
                'key': 'value'
            }
        })
        cli_runner.run_cli('cfy init')

        with update_wd_settings() as wd:
            wd.set_management_server('10.0.0.1')
            wd.set_provider_context({})

        cli_runner.run_cli('cfy teardown -f --ignore-deployments')
        mock_teardown.assert_called_once_with()
Exemplo n.º 21
0
def _update_manager_deployment(local_only=False, agent_remote_key_path=None):

    # get the current provider from the runtime property set on bootstrap
    provider_context = ctx.instance.runtime_properties[
        PROVIDER_RUNTIME_PROPERTY]

    # construct new manager deployment
    provider_context['cloudify'][
        'manager_deployment'] = _dump_manager_deployment()

    # update locally
    ctx.instance.runtime_properties[
        PROVIDER_RUNTIME_PROPERTY] = provider_context
    with utils.update_wd_settings() as wd_settings:
        wd_settings.set_provider_context(provider_context)

    if not local_only:
        # update on server
        _upload_provider_context(
            remote_agents_private_key_path=agent_remote_key_path,
            provider_context=provider_context, update_context=True)
Exemplo n.º 22
0
def _update_manager_deployment(local_only=False, agent_remote_key_path=None):

    # get the current provider from the runtime property set on bootstrap
    provider_context = ctx.instance.runtime_properties[
        PROVIDER_RUNTIME_PROPERTY]

    # construct new manager deployment
    provider_context['cloudify'][
        'manager_deployment'] = _dump_manager_deployment()

    # update locally
    ctx.instance.runtime_properties[
        PROVIDER_RUNTIME_PROPERTY] = provider_context
    with utils.update_wd_settings() as wd_settings:
        wd_settings.set_provider_context(provider_context)

    if not local_only:
        # update on server
        _upload_provider_context(
            remote_agents_private_key_path=agent_remote_key_path,
            provider_context=provider_context, update_context=True)
Exemplo n.º 23
0
def teardown(force, ignore_deployments, config_file_path, ignore_validation):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    client = utils.get_rest_client(management_ip)
    if not ignore_deployments and len(client.deployments.list()) > 0:
        msg = ("Management server {0} has active deployments. Add the "
               "'--ignore-deployments' flag to your command to ignore "
               "these deployments and execute topology teardown."
               .format(management_ip))
        raise exceptions.CloudifyCliError(msg)

    settings = utils.load_cloudify_working_dir_settings()
    if settings.get_is_provider_config():
        provider_common.provider_teardown(config_file_path, ignore_validation)
    else:
        logger.info("tearing down {0}".format(management_ip))
        provider_context = settings.get_provider_context()
        bs.read_manager_deployment_dump_if_needed(
            provider_context.get('cloudify', {}).get('manager_deployment'))
        bs.teardown(name='manager',
                    task_retries=0,
                    task_retry_interval=0,
                    task_thread_pool_size=1)

    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()

    logger.info("teardown complete")
Exemplo n.º 24
0
def teardown(force, ignore_deployments, config_file_path, ignore_validation):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    client = utils.get_rest_client(management_ip)
    if not ignore_deployments and len(client.deployments.list()) > 0:
        msg = ("Management server {0} has active deployments. Add the "
               "'--ignore-deployments' flag to your command to ignore "
               "these deployments and execute topology teardown.".format(
                   management_ip))
        raise exceptions.CloudifyCliError(msg)

    settings = utils.load_cloudify_working_dir_settings()
    if settings.get_is_provider_config():
        provider_common.provider_teardown(config_file_path, ignore_validation)
    else:
        logger.info("tearing down {0}".format(management_ip))
        provider_context = settings.get_provider_context()
        bs.read_manager_deployment_dump_if_needed(
            provider_context.get('cloudify', {}).get('manager_deployment'))
        bs.teardown(name='manager',
                    task_retries=0,
                    task_retry_interval=0,
                    task_thread_pool_size=1)

    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()

    logger.info("teardown complete")
import sys

from cloudify_cli import utils
from cloudify_cli.bootstrap import bootstrap as bs
from cloudify_cli.bootstrap import tasks as bstasks


with utils.update_wd_settings() as settings:
    settings.set_management_key(sys.argv[1])
    print 'Manager key set to path: ' + sys.argv[1]
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment')
    )
    env = bs.load_env('manager')
    storage = env.storage
    for instance in storage.get_node_instances():
        manager_user = instance.runtime_properties.get(
            bstasks.MANAGER_USER_RUNTIME_PROPERTY
        )
        if manager_user:
            settings.set_management_user(manager_user)
            print 'Manager user set to: ' + manager_user
            break
import sys

from cloudify_cli import utils
from cloudify_cli.bootstrap import bootstrap as bs
from cloudify_cli.bootstrap import tasks as bstasks

with utils.update_wd_settings() as settings:
    settings.set_management_key(sys.argv[1])
    print 'Manager key set to path: ' + sys.argv[1]
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    env = bs.load_env('manager')
    storage = env.storage
    for instance in storage.get_node_instances():
        manager_user = instance.runtime_properties.get(
            bstasks.MANAGER_USER_RUNTIME_PROPERTY)
        if manager_user:
            settings.set_management_user(manager_user)
            print 'Manager user set to: ' + manager_user
            break
 def _set_management_creds(self, user, key):
     with self.workdir, update_wd_settings() as ws_settings:
         ws_settings.set_management_user(user)
         ws_settings.set_management_key(key)
Exemplo n.º 28
0
def provider_bootstrap(config_file_path,
                       keep_up,
                       validate_only, skip_validations):
    logger = get_logger()

    provider_deprecation_notice()
    provider_name = utils.get_provider()
    provider = utils.get_provider_module(provider_name)
    try:
        provider_dir = provider.__path__[0]
    except:
        provider_dir = os.path.dirname(provider.__file__)
    provider_config = utils.read_config(config_file_path,
                                        provider_dir)
    logger.info("Prefix for all resources: '{0}'"
                .format(provider_config.resources_prefix))
    pm = provider.ProviderManager(provider_config, cli.get_global_verbosity())
    pm.keep_up_on_failure = keep_up

    if skip_validations and validate_only:
        raise exceptions.CloudifyCliError(
            'Please choose one of skip-validations or '
            'validate-only flags, not both.')
    logger.info('Bootstrapping using {0}'.format(provider_name))
    if skip_validations:
        pm.update_names_in_config()  # Prefixes
    else:
        logger.info('Validating provider resources and configuration')
        pm.augment_schema_with_common()
        if pm.validate_schema():
            raise exceptions.CloudifyValidationError('Provider schema '
                                                     'validations failed!')
        pm.update_names_in_config()  # Prefixes
        if pm.validate():
            raise exceptions.CloudifyValidationError(
                'Provider validations failed!')
        logger.info('Provider validations completed successfully')

    if validate_only:
        return
    with utils.protected_provider_call():
        logger.info('Provisioning resources for management server...')
        params = pm.provision()

    installed = False
    provider_context = {}

    def keep_up_or_teardown():
        if keep_up:
            logger.info('topology will remain up')
        else:
            logger.info('tearing down topology'
                        ' due to bootstrap failure')
            pm.teardown(provider_context)

    if params:
        mgmt_ip, private_ip, ssh_key, ssh_user, provider_context = params
        logger.info('provisioning complete')
        logger.info('ensuring connectivity with the management server...')
        if pm.ensure_connectivity_with_management_server(
                mgmt_ip, ssh_key, ssh_user):
            logger.info('connected with the management server successfully')
            logger.info('bootstrapping the management server...')
            try:
                installed = pm.bootstrap(mgmt_ip, private_ip, ssh_key,
                                         ssh_user)
            except BaseException:
                logger.error('bootstrapping failed!')
                keep_up_or_teardown()
                raise
            logger.info('bootstrapping complete') if installed else \
                logger.error('bootstrapping failed!')
        else:
            logger.error('failed connecting to the management server!')
    else:
        logger.error('provisioning failed!')

    if installed:
        _update_provider_context(provider_config,
                                 provider_context)

        mgmt_ip = mgmt_ip.encode('utf-8')

        with utils.update_wd_settings() as wd_settings:
            wd_settings.set_management_server(mgmt_ip)
            wd_settings.set_management_key(ssh_key)
            wd_settings.set_management_user(ssh_user)
            wd_settings.set_provider_context(provider_context)

        # storing provider context on management server
        utils.get_rest_client(mgmt_ip).manager.create_context(provider_name,
                                                              provider_context)

        logger.info('management server is up at {0} '
                    '(is now set as the default management server)'
                    .format(mgmt_ip))
    else:
        keep_up_or_teardown()
        raise exceptions.CloudifyBootstrapError()
Exemplo n.º 29
0
def bootstrap(
    keep_up,
    validate_only,
    skip_validations,
    blueprint_path,
    inputs,
    install_plugins,
    task_retries,
    task_retry_interval,
    task_thread_pool_size,
):
    logger = get_logger()
    env_name = "manager"

    # Verify directory is initialized
    utils.get_context_path()

    # verifying no environment exists from a previous bootstrap
    try:
        bs.load_env(env_name)
    except IOError:
        # Environment is clean
        pass
    else:
        raise RuntimeError(
            "Can't bootstrap because the environment is not clean. Clean the "
            'environment by calling teardown or reset it using the "cfy init '
            '-r" command'
        )

    if not skip_validations:
        logger.info("Executing bootstrap validation...")
        bs.bootstrap_validation(
            blueprint_path,
            name=env_name,
            inputs=inputs,
            task_retries=task_retries,
            task_retry_interval=task_retry_interval,
            task_thread_pool_size=task_thread_pool_size,
            install_plugins=install_plugins,
            resolver=utils.get_import_resolver(),
        )
        logger.info("Bootstrap validation completed successfully")
    elif inputs:
        # The user expects that `--skip-validations` will also ignore
        # bootstrap validations and not only creation_validations
        inputs = common.add_ignore_bootstrap_validations_input(inputs)

    if not validate_only:
        try:
            logger.info("Executing manager bootstrap...")
            details = bs.bootstrap(
                blueprint_path,
                name=env_name,
                inputs=inputs,
                task_retries=task_retries,
                task_retry_interval=task_retry_interval,
                task_thread_pool_size=task_thread_pool_size,
                install_plugins=install_plugins,
            )

            manager_ip = details["manager_ip"]

            provider_context = details["provider_context"]
            with utils.update_wd_settings() as ws_settings:
                ws_settings.set_management_server(manager_ip)
                ws_settings.set_management_key(details["manager_key_path"])
                ws_settings.set_management_user(details["manager_user"])
                ws_settings.set_management_port(details["manager_port"])
                ws_settings.set_provider_context(provider_context)
                ws_settings.set_rest_port(details["rest_port"])
                ws_settings.set_rest_protocol(details["rest_protocol"])

            logger.info("Bootstrap complete")
            logger.info("Manager is up at {0}".format(manager_ip))
        except Exception as ex:
            tpe, value, traceback = sys.exc_info()
            logger.error("Bootstrap failed! ({0})".format(str(ex)))
            if not keep_up:
                try:
                    bs.load_env(env_name)
                except IOError:
                    # the bootstrap exception occurred before environment was
                    # even initialized - nothing to teardown.
                    pass
                else:
                    logger.info("Executing teardown due to failed bootstrap...")
                    bs.teardown(name=env_name, task_retries=5, task_retry_interval=30, task_thread_pool_size=1)
            raise tpe, value, traceback
Exemplo n.º 30
0
def provider_bootstrap(config_file_path, keep_up, validate_only,
                       skip_validations):
    logger = get_logger()

    provider_deprecation_notice()
    provider_name = utils.get_provider()
    provider = utils.get_provider_module(provider_name)
    try:
        provider_dir = provider.__path__[0]
    except:
        provider_dir = os.path.dirname(provider.__file__)
    provider_config = utils.read_config(config_file_path, provider_dir)
    logger.info("Prefix for all resources: '{0}'".format(
        provider_config.resources_prefix))
    pm = provider.ProviderManager(provider_config, cli.get_global_verbosity())
    pm.keep_up_on_failure = keep_up

    if skip_validations and validate_only:
        raise exceptions.CloudifyCliError(
            'Please choose one of skip-validations or '
            'validate-only flags, not both.')
    logger.info('Bootstrapping using {0}'.format(provider_name))
    if skip_validations:
        pm.update_names_in_config()  # Prefixes
    else:
        logger.info('Validating provider resources and configuration')
        pm.augment_schema_with_common()
        if pm.validate_schema():
            raise exceptions.CloudifyValidationError('Provider schema '
                                                     'validations failed!')
        pm.update_names_in_config()  # Prefixes
        if pm.validate():
            raise exceptions.CloudifyValidationError(
                'Provider validations failed!')
        logger.info('Provider validations completed successfully')

    if validate_only:
        return
    with utils.protected_provider_call():
        logger.info('Provisioning resources for management server...')
        params = pm.provision()

    installed = False
    provider_context = {}

    def keep_up_or_teardown():
        if keep_up:
            logger.info('topology will remain up')
        else:
            logger.info('tearing down topology' ' due to bootstrap failure')
            pm.teardown(provider_context)

    if params:
        mgmt_ip, private_ip, ssh_key, ssh_user, provider_context = params
        logger.info('provisioning complete')
        logger.info('ensuring connectivity with the management server...')
        if pm.ensure_connectivity_with_management_server(
                mgmt_ip, ssh_key, ssh_user):
            logger.info('connected with the management server successfully')
            logger.info('bootstrapping the management server...')
            try:
                installed = pm.bootstrap(mgmt_ip, private_ip, ssh_key,
                                         ssh_user)
            except BaseException:
                logger.error('bootstrapping failed!')
                keep_up_or_teardown()
                raise
            logger.info('bootstrapping complete') if installed else \
                logger.error('bootstrapping failed!')
        else:
            logger.error('failed connecting to the management server!')
    else:
        logger.error('provisioning failed!')

    if installed:
        _update_provider_context(provider_config, provider_context)

        mgmt_ip = mgmt_ip.encode('utf-8')

        with utils.update_wd_settings() as wd_settings:
            wd_settings.set_management_server(mgmt_ip)
            wd_settings.set_management_key(ssh_key)
            wd_settings.set_management_user(ssh_user)
            wd_settings.set_provider_context(provider_context)

        # storing provider context on management server
        utils.get_rest_client(mgmt_ip).manager.create_context(
            provider_name, provider_context)

        logger.info(
            'management server is up at {0} '
            '(is now set as the default management server)'.format(mgmt_ip))
    else:
        keep_up_or_teardown()
        raise exceptions.CloudifyBootstrapError()
Exemplo n.º 31
0
def bootstrap(keep_up, validate_only, skip_validations, blueprint_path, inputs,
              install_plugins, task_retries, task_retry_interval,
              task_thread_pool_size):
    logger = get_logger()
    env_name = 'manager'

    # Verify directory is initialized
    utils.get_context_path()

    # verifying no environment exists from a previous bootstrap
    try:
        bs.load_env(env_name)
    except IOError:
        # Environment is clean
        pass
    else:
        raise RuntimeError(
            "Can't bootstrap because the environment is not clean. Clean the "
            'environment by calling teardown or reset it using the "cfy init '
            '-r" command')

    if not skip_validations:
        logger.info('executing bootstrap validation')
        bs.bootstrap_validation(blueprint_path,
                                name=env_name,
                                inputs=inputs,
                                task_retries=task_retries,
                                task_retry_interval=task_retry_interval,
                                task_thread_pool_size=task_thread_pool_size,
                                install_plugins=install_plugins,
                                resolver=utils.get_import_resolver())
        logger.info('bootstrap validation completed successfully')

    if not validate_only:
        try:
            logger.info('executing bootstrap')
            details = bs.bootstrap(blueprint_path,
                                   name=env_name,
                                   inputs=inputs,
                                   task_retries=task_retries,
                                   task_retry_interval=task_retry_interval,
                                   task_thread_pool_size=task_thread_pool_size,
                                   install_plugins=install_plugins)

            manager_ip = details['manager_ip']
            provider_context = details['provider_context']
            with utils.update_wd_settings() as ws_settings:
                ws_settings.set_management_server(manager_ip)
                ws_settings.set_management_key(details['manager_key_path'])
                ws_settings.set_management_user(details['manager_user'])
                ws_settings.set_provider_context(provider_context)
                ws_settings.set_rest_port(details['rest_port'])
                ws_settings.set_protocol(details['protocol'])

            logger.info('bootstrapping complete')
            logger.info('management server is up at {0}'.format(manager_ip))
        except Exception as ex:
            tpe, value, traceback = sys.exc_info()
            logger.error('bootstrap failed! ({0})'.format(str(ex)))
            if not keep_up:
                try:
                    bs.load_env(env_name)
                except IOError:
                    # the bootstrap exception occurred before environment was
                    # even initialized - nothing to teardown.
                    pass
                else:
                    logger.info('executing teardown due to failed bootstrap')
                    bs.teardown(name=env_name,
                                task_retries=5,
                                task_retry_interval=30,
                                task_thread_pool_size=1)
            raise tpe, value, traceback
Exemplo n.º 32
0
def bootstrap(keep_up,
              validate_only,
              skip_validations,
              blueprint_path,
              inputs,
              install_plugins,
              task_retries,
              task_retry_interval,
              task_thread_pool_size):
    logger = get_logger()
    env_name = 'manager'

    # Verify directory is initialized
    utils.get_context_path()

    # verifying no environment exists from a previous bootstrap
    try:
        bs.load_env(env_name)
    except IOError:
        # Environment is clean
        pass
    else:
        raise RuntimeError(
            "Can't bootstrap because the environment is not clean. Clean the "
            'environment by calling teardown or reset it using the "cfy init '
            '-r" command')

    if not skip_validations:
        logger.info('Executing bootstrap validation...')
        bs.bootstrap_validation(
            blueprint_path,
            name=env_name,
            inputs=inputs,
            task_retries=task_retries,
            task_retry_interval=task_retry_interval,
            task_thread_pool_size=task_thread_pool_size,
            install_plugins=install_plugins,
            resolver=utils.get_import_resolver())
        logger.info('Bootstrap validation completed successfully')

    if not validate_only:
        try:
            logger.info('Executing manager bootstrap...')
            details = bs.bootstrap(
                blueprint_path,
                name=env_name,
                inputs=inputs,
                task_retries=task_retries,
                task_retry_interval=task_retry_interval,
                task_thread_pool_size=task_thread_pool_size,
                install_plugins=install_plugins)

            manager_ip = details['manager_ip']
            provider_context = details['provider_context']
            with utils.update_wd_settings() as ws_settings:
                ws_settings.set_management_server(manager_ip)
                ws_settings.set_management_key(details['manager_key_path'])
                ws_settings.set_management_user(details['manager_user'])
                ws_settings.set_provider_context(provider_context)
                ws_settings.set_rest_port(details['rest_port'])
                ws_settings.set_protocol(details['protocol'])

            logger.info('Bootstrap complete')
            logger.info('Manager is up at {0}'.format(manager_ip))
        except Exception as ex:
            tpe, value, traceback = sys.exc_info()
            logger.error('Bootstrap failed! ({0})'.format(str(ex)))
            if not keep_up:
                try:
                    bs.load_env(env_name)
                except IOError:
                    # the bootstrap exception occurred before environment was
                    # even initialized - nothing to teardown.
                    pass
                else:
                    logger.info(
                        'Executing teardown due to failed bootstrap...')
                    bs.teardown(name=env_name,
                                task_retries=5,
                                task_retry_interval=30,
                                task_thread_pool_size=1)
            raise tpe, value, traceback
Exemplo n.º 33
0
def bootstrap(config_file_path, keep_up, validate_only, skip_validations,
              blueprint_path, inputs, install_plugins, task_retries,
              task_retry_interval, task_thread_pool_size):
    logger = get_logger()
    settings = utils.load_cloudify_working_dir_settings()
    if settings.get_is_provider_config():
        if blueprint_path or inputs:
            raise ValueError(
                'the "blueprint_path" and "inputs" parameters '
                'are not to be used with the deprecated provider API')
        return provider_common.provider_bootstrap(config_file_path, keep_up,
                                                  validate_only,
                                                  skip_validations)

    env_name = 'manager'

    # verifying no environment exists from a previous bootstrap
    try:
        bs.load_env(env_name)
    except IOError:
        # Environment is clean
        pass
    else:
        raise RuntimeError(
            "Can't bootstrap because the environment is not clean. Clean the "
            'environment by calling teardown or reset it using the "cfy init '
            '-r" command')

    if not skip_validations:
        logger.info('executing bootstrap validation')
        bs.bootstrap_validation(blueprint_path,
                                name=env_name,
                                inputs=inputs,
                                task_retries=task_retries,
                                task_retry_interval=task_retry_interval,
                                task_thread_pool_size=task_thread_pool_size,
                                install_plugins=install_plugins)
        logger.info('bootstrap validation completed successfully')

    if not validate_only:
        try:
            logger.info('executing bootstrap')
            details = bs.bootstrap(blueprint_path,
                                   name=env_name,
                                   inputs=inputs,
                                   task_retries=task_retries,
                                   task_retry_interval=task_retry_interval,
                                   task_thread_pool_size=task_thread_pool_size,
                                   install_plugins=install_plugins)

            manager_ip = details['manager_ip']
            provider_name = details['provider_name']
            provider_context = details['provider_context']
            with utils.update_wd_settings() as ws_settings:
                ws_settings.set_management_server(manager_ip)
                ws_settings.set_management_key(details['manager_key_path'])
                ws_settings.set_management_user(details['manager_user'])
                ws_settings.set_provider(provider_name)
                ws_settings.set_provider_context(provider_context)

            logger.info('bootstrapping complete')
            logger.info('management server is up at {0}'.format(manager_ip))
        except Exception:
            tpe, value, traceback = sys.exc_info()
            logger.error('bootstrap failed!')
            if not keep_up:
                try:
                    bs.load_env(env_name)
                except IOError:
                    # the bootstrap exception occurred before environment was
                    # even initialized - nothing to teardown.
                    pass
                else:
                    logger.info('executing teardown due to failed bootstrap')
                    bs.teardown(name=env_name,
                                task_retries=5,
                                task_retry_interval=30,
                                task_thread_pool_size=1)
            raise tpe, value, traceback