コード例 #1
0
def provider_init(provider, reset_config):
    logger = get_logger()

    provider_deprecation_notice()
    if os.path.exists(
            os.path.join(utils.get_cwd(),
                         constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME,
                         constants.CLOUDIFY_WD_SETTINGS_FILE_NAME)):
        if not reset_config:
            msg = ('Current directory is already initialized. '
                   'Use the "-r" flag to force '
                   'reinitialization (might overwrite '
                   'provider configuration files if exist).')
            raise exceptions.CloudifyCliError(msg)
        else:
            # resetting provider configuration
            logger.debug('resetting configuration...')
            _provider_init(provider, reset_config)
            logger.info("Configuration reset complete")
            return

    logger.info("Initializing Cloudify")
    provider_module_name = _provider_init(provider, reset_config)
    settings = utils.CloudifyWorkingDirectorySettings()
    settings.set_provider(provider_module_name)
    settings.set_is_provider_config(True)

    utils.dump_cloudify_working_dir_settings(settings)
    utils.dump_configuration_file()

    logger.info("Initialization complete")
コード例 #2
0
def install_blueprint_plugins(blueprint_path):

    requirements = create_requirements(
        blueprint_path=blueprint_path
    )

    # validate we are inside a virtual env
    if not utils.is_virtual_env():
        raise exceptions.CloudifyCliError(
            'You must be running inside a '
            'virtualenv to install blueprint plugins')

    runner = LocalCommandRunner(get_logger())

    # dump the requirements to a file
    # and let pip install it.
    # this will utilize pip's mechanism
    # of cleanup in case an installation fails.
    output = tempfile.NamedTemporaryFile(mode='w',
                                         delete=True,
                                         suffix='.txt',
                                         prefix='requirements_')
    utils.dump_to_file(collection=requirements,
                       file_path=output.name)
    runner.run(command='pip install -r {0}'.format(output.name),
               stdout_pipe=False)
コード例 #3
0
ファイル: init.py プロジェクト: pkdevboxy/cloudify-cli
def init(reset_config, skip_logging=False):
    if os.path.exists(
            os.path.join(utils.get_cwd(),
                         constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME,
                         constants.CLOUDIFY_WD_SETTINGS_FILE_NAME)):
        if not reset_config:
            msg = 'Current directory is already initialized'
            error = exceptions.CloudifyCliError(msg)
            error.possible_solutions = [
                "Run 'cfy init -r' to force re-initialization "
                "(might overwrite existing "
                "configuration files if exist) "
            ]
            raise error
        else:
            shutil.rmtree(
                os.path.join(utils.get_cwd(),
                             constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME))

    settings = utils.CloudifyWorkingDirectorySettings()
    utils.dump_cloudify_working_dir_settings(settings)
    utils.dump_configuration_file()
    configure_loggers()
    if not skip_logging:
        get_logger().info('Initialization completed successfully')
コード例 #4
0
ファイル: teardown.py プロジェクト: pkdevboxy/cloudify-cli
def _validate_deployments(ignore_deployments, management_ip):
    if ignore_deployments:
        return
    if _get_number_of_deployments(management_ip) > 0:
        raise exceptions.CloudifyCliError(
            "Manager server {0} has existing deployments. Delete "
            "all deployments first or add the "
            "'--ignore-deployments' flag to your command to ignore "
            "these deployments and execute teardown.".format(management_ip))
コード例 #5
0
def recover(force, task_retries, task_retry_interval, task_thread_pool_size):
    logger = get_logger()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    if CLOUDIFY_MANAGER_PK_PATH_ENVAR in os.environ:
        # user defined the key file path inside an env variable.
        # validate the existence of the keyfile because it will later be
        # used in a fabric task to ssh to the manager
        key_path = os.path.expanduser(
            os.environ[CLOUDIFY_MANAGER_PK_PATH_ENVAR])
        if not os.path.isfile(key_path):
            raise exceptions.CloudifyValidationError(
                "Cannot perform recovery. manager private key file "
                "defined in {0} environment variable does not "
                "exist: {1}".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR, key_path))
    else:
        # try retrieving the key file from the local context
        try:
            key_path = os.path.expanduser(utils.get_management_key())
            if not os.path.isfile(key_path):
                # manager key file path exists in context but does not exist
                # in the file system. fail now.
                raise exceptions.CloudifyValidationError(
                    "Cannot perform recovery. manager key file does not "
                    "exist: {0}. Set the manager private key path via the {1} "
                    "environment variable".format(
                        key_path, CLOUDIFY_MANAGER_PK_PATH_ENVAR))
            # in this case, the recovery is executed from the same directory
            # that the bootstrap was executed from. we should not have
            # problems
        except exceptions.CloudifyCliError:
            # manager key file path does not exist in the context. this
            # means the recovery is executed from a different directory than
            # the bootstrap one. is this case the user must set the
            # environment variable to continue.
            raise exceptions.CloudifyValidationError(
                "Cannot perform recovery. manager key file not found. Set "
                "the manager private key path via the {0} environment "
                "variable".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR))

    logger.info('Recovering manager deployment')
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.recover(task_retries=task_retries,
               task_retry_interval=task_retry_interval,
               task_thread_pool_size=task_thread_pool_size)
    logger.info('Successfully recovered manager deployment')
コード例 #6
0
def _load_env():
    if not os.path.isdir(_storage_dir()):
        error = exceptions.CloudifyCliError(
            '{0} has not been initialized with a blueprint.'.format(
                utils.get_cwd()))

        # init was probably not executed.
        # suggest solution.

        error.possible_solutions = ["Run 'cfy local init' in this directory"]
        raise error
    return local.load_env(name=_NAME, storage=_storage())
コード例 #7
0
def instances(node_id):
    logger = get_logger()
    env = _load_env()
    node_instances = env.storage.get_node_instances()
    if node_id:
        node_instances = [
            instance for instance in node_instances
            if instance.node_id == node_id
        ]
        if not node_instances:
            raise exceptions.CloudifyCliError(
                'No node with id: {0}'.format(node_id))
    logger.info(json.dumps(node_instances, sort_keys=True, indent=2))
コード例 #8
0
ファイル: teardown.py プロジェクト: pkdevboxy/cloudify-cli
def _get_number_of_deployments(management_ip):
    client = utils.get_rest_client(management_ip)
    try:
        return len(client.deployments.list())
    except CloudifyClientError:
        raise exceptions.CloudifyCliError(
            "Failed querying manager server {0} about existing "
            "deployments; The Manager server may be down. If you wish to "
            "skip this check, you may use the "
            '--ignore-deployments'
            " "
            "flag, in which case teardown will occur regardless of "
            "the deployments status.".format(management_ip))
コード例 #9
0
ファイル: teardown.py プロジェクト: iconoeugen/cloudify-cli
def teardown(force, ignore_deployments, config_file_path, ignore_validation):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    client = utils.get_rest_client(management_ip)
    if not ignore_deployments and len(client.deployments.list()) > 0:
        msg = ("Management server {0} has active deployments. Add the "
               "'--ignore-deployments' flag to your command to ignore "
               "these deployments and execute topology teardown.".format(
                   management_ip))
        raise exceptions.CloudifyCliError(msg)

    settings = utils.load_cloudify_working_dir_settings()
    if settings.get_is_provider_config():
        provider_common.provider_teardown(config_file_path, ignore_validation)
    else:
        logger.info("tearing down {0}".format(management_ip))
        provider_context = settings.get_provider_context()
        bs.read_manager_deployment_dump_if_needed(
            provider_context.get('cloudify', {}).get('manager_deployment'))
        bs.teardown(name='manager',
                    task_retries=0,
                    task_retry_interval=0,
                    task_thread_pool_size=1)

    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()

    logger.info("teardown complete")
コード例 #10
0
def create_requirements(blueprint_path, output):
    logger = get_logger()
    if output and os.path.exists(output):
        raise exceptions.CloudifyCliError(
            'output path already exists : {0}'.format(output))

    requirements = common.create_requirements(blueprint_path=blueprint_path)

    if output:
        utils.dump_to_file(requirements, output)
        logger.info('Requirements created successfully --> {0}'.format(output))
    else:
        # we don't want to use just lgr
        # since we want this output to be prefix free.
        # this will make it possible to pipe the
        # output directly to pip
        for requirement in requirements:
            print(requirement)
            logger.info(requirement)
コード例 #11
0
ファイル: teardown.py プロジェクト: pkdevboxy/cloudify-cli
def teardown(force, ignore_deployments):

    _validate_force(force)

    try:
        management_ip = utils.get_management_server_ip()
    except exceptions.CloudifyCliError:
        # management ip does not exist in the local context
        # this can mean one of two things:
        # 1. bootstrap was unsuccessful
        # 2. we are in the wrong directory
        try:
            bs.load_env()
            # this means we are probably in the right directory
            # which means the teardown was unsuccessful, try to teardown
            # anyway
        except BaseException:
            # this means we are in the wrong directory, have the user
            # execute the 'use' command to retrieve manager deployment,
            # because other wise we cannot bootstrap from here. If the
            # manager is down, the user must return to the original
            # directory in order to teardown
            raise exceptions.CloudifyCliError(
                "You are attempting to execute 'teardown' from an "
                "invalid directory. Please execute 'cfy use' before "
                "running this command. If the management server is "
                "unavailable, you must execute this command from the "
                "directory you initially bootstrapped from, or from the last "
                "directory a 'cfy use' command was executed on this manager.")
        else:
            _do_teardown()
    else:
        # make sure we don't teardown the manager if there are running
        # deployments, unless the user explicitly specified it.
        _validate_deployments(ignore_deployments, management_ip)

        # update local provider context since the server id might have
        # changed in case it has gone through a recovery process.
        _update_local_provider_context(management_ip)

        # execute teardown
        _do_teardown()
コード例 #12
0
ファイル: common.py プロジェクト: Cloudxtreme/cloudify-cli
def install_blueprint_plugins(blueprint_path):

    requirements = create_requirements(blueprint_path=blueprint_path)

    if requirements:
        # validate we are inside a virtual env
        if not utils.is_virtual_env():
            raise exceptions.CloudifyCliError(
                'You must be running inside a '
                'virtualenv to install blueprint plugins')

        runner = LocalCommandRunner(get_logger())
        # dump the requirements to a file
        # and let pip install it.
        # this will utilize pip's mechanism
        # of cleanup in case an installation fails.
        tmp_path = tempfile.mkstemp(suffix='.txt', prefix='requirements_')[1]
        utils.dump_to_file(collection=requirements, file_path=tmp_path)
        runner.run(command='pip install -r {0}'.format(tmp_path),
                   stdout_pipe=False)
    else:
        get_logger().debug('There are no plugins to install..')
コード例 #13
0
def _provider_init(provider, reset_config):
    """
    initializes a provider by copying its config files to the cwd.
    First, will look for a module named cloudify_#provider#.
    If not found, will look for #provider#.
    If install is True, will install the supplied provider and perform
    the search again.

    :param string provider: the provider's name
    :param bool reset_config: if True, overrides the current config.
    :rtype: `string` representing the provider's module name
    """

    logger = get_logger()

    provider_module_name, provider = get_provider_by_name(provider)

    target_file = os.path.join(utils.get_cwd(), constants.CONFIG_FILE_NAME)
    if not reset_config and os.path.exists(target_file):
        msg = ('Target directory {0} already contains a '
               'provider configuration file; '
               'use the "-r" flag to '
               'reset it back to its default values.'.format(
                   os.path.dirname(target_file)))
        raise exceptions.CloudifyCliError(msg)
    else:
        # try to get the path if the provider is a module
        try:
            provider_dir = provider.__path__[0]
        # if not, assume it's in the package's dir
        except:
            provider_dir = os.path.dirname(provider.__file__)
        files_path = os.path.join(provider_dir, constants.CONFIG_FILE_NAME)
        logger.debug('Copying provider files from {0} to {1}'.format(
            files_path, utils.get_cwd()))
        shutil.copy(files_path, utils.get_cwd())

    return provider_module_name
コード例 #14
0
ファイル: teardown.py プロジェクト: pkdevboxy/cloudify-cli
def _validate_force(force):
    if not force:
        raise exceptions.CloudifyCliError(
            "This action requires additional confirmation. Add the '-f' or "
            "'--force' flags to your command if you are certain this command "
            "should be executed.")
コード例 #15
0
def provider_bootstrap(config_file_path, keep_up, validate_only,
                       skip_validations):
    logger = get_logger()

    provider_deprecation_notice()
    provider_name = utils.get_provider()
    provider = utils.get_provider_module(provider_name)
    try:
        provider_dir = provider.__path__[0]
    except:
        provider_dir = os.path.dirname(provider.__file__)
    provider_config = utils.read_config(config_file_path, provider_dir)
    logger.info("Prefix for all resources: '{0}'".format(
        provider_config.resources_prefix))
    pm = provider.ProviderManager(provider_config, cli.get_global_verbosity())
    pm.keep_up_on_failure = keep_up

    if skip_validations and validate_only:
        raise exceptions.CloudifyCliError(
            'Please choose one of skip-validations or '
            'validate-only flags, not both.')
    logger.info('Bootstrapping using {0}'.format(provider_name))
    if skip_validations:
        pm.update_names_in_config()  # Prefixes
    else:
        logger.info('Validating provider resources and configuration')
        pm.augment_schema_with_common()
        if pm.validate_schema():
            raise exceptions.CloudifyValidationError('Provider schema '
                                                     'validations failed!')
        pm.update_names_in_config()  # Prefixes
        if pm.validate():
            raise exceptions.CloudifyValidationError(
                'Provider validations failed!')
        logger.info('Provider validations completed successfully')

    if validate_only:
        return
    with utils.protected_provider_call():
        logger.info('Provisioning resources for management server...')
        params = pm.provision()

    installed = False
    provider_context = {}

    def keep_up_or_teardown():
        if keep_up:
            logger.info('topology will remain up')
        else:
            logger.info('tearing down topology' ' due to bootstrap failure')
            pm.teardown(provider_context)

    if params:
        mgmt_ip, private_ip, ssh_key, ssh_user, provider_context = params
        logger.info('provisioning complete')
        logger.info('ensuring connectivity with the management server...')
        if pm.ensure_connectivity_with_management_server(
                mgmt_ip, ssh_key, ssh_user):
            logger.info('connected with the management server successfully')
            logger.info('bootstrapping the management server...')
            try:
                installed = pm.bootstrap(mgmt_ip, private_ip, ssh_key,
                                         ssh_user)
            except BaseException:
                logger.error('bootstrapping failed!')
                keep_up_or_teardown()
                raise
            logger.info('bootstrapping complete') if installed else \
                logger.error('bootstrapping failed!')
        else:
            logger.error('failed connecting to the management server!')
    else:
        logger.error('provisioning failed!')

    if installed:
        _update_provider_context(provider_config, provider_context)

        mgmt_ip = mgmt_ip.encode('utf-8')

        with utils.update_wd_settings() as wd_settings:
            wd_settings.set_management_server(mgmt_ip)
            wd_settings.set_management_key(ssh_key)
            wd_settings.set_management_user(ssh_user)
            wd_settings.set_provider_context(provider_context)

        # storing provider context on management server
        utils.get_rest_client(mgmt_ip).manager.create_context(
            provider_name, provider_context)

        logger.info(
            'management server is up at {0} '
            '(is now set as the default management server)'.format(mgmt_ip))
    else:
        keep_up_or_teardown()
        raise exceptions.CloudifyBootstrapError()