コード例 #1
0
    def test_dump_cosmo_working_dir_settings_create(self):

        directory_settings = CloudifyWorkingDirectorySettings()
        utils.dump_cloudify_working_dir_settings(
            cosmo_wd_settings=directory_settings, update=False)

        utils.load_cloudify_working_dir_settings()
コード例 #2
0
ファイル: test_utils.py プロジェクト: funkyHat/cloudify-cli
    def test_dump_cosmo_working_dir_settings_create(self):

        directory_settings = CloudifyWorkingDirectorySettings()
        utils.dump_cloudify_working_dir_settings(
            cosmo_wd_settings=directory_settings,
            update=False)

        utils.load_cloudify_working_dir_settings()
コード例 #3
0
def _get_provider_name_and_context(mgmt_ip):
    logger = get_logger()

    # trying to retrieve provider context from server
    try:
        response = utils.get_rest_client(mgmt_ip).manager.get_context()
        return response['name'], response['context']
    except rest_exception.CloudifyClientError as e:
        logger.warn('Failed to get provider context from server: {0}'.format(
            str(e)))

    # using the local provider context instead (if it's relevant for the
    # target server)
    cosmo_wd_settings = utils.load_cloudify_working_dir_settings()
    if cosmo_wd_settings.get_provider_context():
        default_mgmt_server_ip = cosmo_wd_settings.get_management_server()
        if default_mgmt_server_ip == mgmt_ip:
            provider_name = utils.get_provider()
            return provider_name, cosmo_wd_settings.get_provider_context()
        else:
            # the local provider context data is for a different server
            msg = "Failed to get provider context from target server"
    else:
        msg = "Provider context is not set in working directory settings (" \
              "The provider is used during the bootstrap and teardown " \
              "process. This probably means that the manager was started " \
              "manually, without the bootstrap command therefore calling " \
              "teardown is not supported)."
    raise RuntimeError(msg)
コード例 #4
0
ファイル: commands.py プロジェクト: AviaE/claw
def bootstrap(configuration,
              inputs_override=None,
              manager_blueprint_override=None,
              reset=False):
    """Bootstrap a configuration based environment."""
    conf = Configuration(configuration)
    if not conf.exists() or reset:
        generate(configuration=configuration,
                 inputs_override=inputs_override,
                 manager_blueprint_override=manager_blueprint_override,
                 reset=reset)
    with conf.dir:
        if conf.cli_config_path.exists():
            raise ALREADY_INITIALIZED
        cfy.init().wait()
        with conf.patch.cli_config as patch:
            patch.obj['colors'] = True
        cfy.bootstrap(blueprint_path=conf.manager_blueprint_path,
                      inputs=conf.inputs_path).wait()
        cli_settings = load_cloudify_working_dir_settings()
        with conf.patch.handler_configuration as patch:
            patch.obj.update({
                'manager_ip': cli_settings.get_management_server(),
                'manager_key': cli_settings.get_management_key(),
                'manager_user': cli_settings.get_management_user()
            })
コード例 #5
0
ファイル: recover.py プロジェクト: geokala/cloudify-cli
def recover(force,
            task_retries,
            task_retry_interval,
            task_thread_pool_size):
    logger = get_logger()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    if CLOUDIFY_MANAGER_PK_PATH_ENVAR not in os.environ:
        if not os.path.isfile(os.path.expanduser(utils.get_management_key())):
            raise RuntimeError("Can't find manager private key file. Set the "
                               "path to it using the {0} environment variable"
                               .format(CLOUDIFY_MANAGER_PK_PATH_ENVAR))

    logger.info('Recovering manager deployment')
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.recover(task_retries=task_retries,
               task_retry_interval=task_retry_interval,
               task_thread_pool_size=task_thread_pool_size)
    logger.info('Successfully recovered manager deployment')
コード例 #6
0
def recover(force,
            task_retries,
            task_retry_interval,
            task_thread_pool_size):
    logger = get_logger()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    if CLOUDIFY_MANAGER_PK_PATH_ENVAR in os.environ:
        # user defined the key file path inside an env variable.
        # validate the existence of the keyfile because it will later be
        # used in a fabric task to ssh to the manager
        key_path = os.path.expanduser(os.environ[
            CLOUDIFY_MANAGER_PK_PATH_ENVAR])
        if not os.path.isfile(key_path):
            raise exceptions.CloudifyValidationError(
                "Cannot perform recovery. manager private key file "
                "defined in {0} environment variable does not "
                "exist: {1}".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR, key_path)
            )
    else:
        # try retrieving the key file from the local context
        try:
            key_path = os.path.expanduser(utils.get_management_key())
            if not os.path.isfile(key_path):
                # manager key file path exists in context but does not exist
                # in the file system. fail now.
                raise exceptions.CloudifyValidationError(
                    "Cannot perform recovery. manager key file does not "
                    "exist: {0}. Set the manager private key path via the {1} "
                    "environment variable"
                    .format(key_path, CLOUDIFY_MANAGER_PK_PATH_ENVAR)
                )
            # in this case, the recovery is executed from the same directory
            # that the bootstrap was executed from. we should not have
            # problems
        except exceptions.CloudifyCliError:
            # manager key file path does not exist in the context. this
            # means the recovery is executed from a different directory than
            # the bootstrap one. is this case the user must set the
            # environment variable to continue.
            raise exceptions.CloudifyValidationError(
                "Cannot perform recovery. manager key file not found. Set "
                "the manager private key path via the {0} environment "
                "variable".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR)
            )

    logger.info('Recovering manager deployment')
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.recover(task_retries=task_retries,
               task_retry_interval=task_retry_interval,
               task_thread_pool_size=task_thread_pool_size)
    logger.info('Successfully recovered manager deployment')
コード例 #7
0
def _get_provider_name_and_context(mgmt_ip):
    logger = get_logger()

    # trying to retrieve provider context from server
    try:
        response = utils.get_rest_client(mgmt_ip).manager.get_context()
        return response['name'], response['context']
    except rest_exception.CloudifyClientError as e:
        logger.warn('Failed to get provider context from server: {0}'.format(
            str(e)))

    # using the local provider context instead (if it's relevant for the
    # target server)
    cosmo_wd_settings = utils.load_cloudify_working_dir_settings()
    if cosmo_wd_settings.get_provider_context():
        default_mgmt_server_ip = cosmo_wd_settings.get_management_server()
        if default_mgmt_server_ip == mgmt_ip:
            provider_name = utils.get_provider()
            return provider_name, cosmo_wd_settings.get_provider_context()
        else:
            # the local provider context data is for a different server
            msg = "Failed to get provider context from target server"
    else:
        msg = "Provider context is not set in working directory settings (" \
              "The provider is used during the bootstrap and teardown " \
              "process. This probably means that the manager was started " \
              "manually, without the bootstrap command therefore calling " \
              "teardown is not supported)."
    raise RuntimeError(msg)
コード例 #8
0
ファイル: cfy.py プロジェクト: dankilman/claw
def get_manager_key():
    if NEW_CLI:
        profile = get_profile_context()
        return profile.manager_key
    else:
        cli_settings = load_cloudify_working_dir_settings()
        return cli_settings.get_management_key()
コード例 #9
0
def teardown(force, ignore_deployments):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    client = utils.get_rest_client(management_ip)
    try:
        if not ignore_deployments and len(client.deployments.list()) > 0:
            msg = \
                ("Manager server {0} has existing deployments. Delete all "
                 "deployments first or add the '--ignore-deployments' flag to "
                 "your command to ignore these deployments and execute "
                 "teardown.".format(management_ip))
            raise exceptions.CloudifyCliError(msg)
    except IOError:
        msg = \
            "Failed querying manager server {0} about existing " \
            "deployments; The Manager server may be down. If you wish to " \
            'skip this check, you may use the "--ignore-deployments" flag, ' \
            'in which case teardown will occur regardless of the Manager ' \
            "server's status.".format(management_ip)
        raise exceptions.CloudifyCliError(msg)

    logger.info("tearing down {0}".format(management_ip))

    # runtime properties might have changed since the last time we
    # executed 'use', because of recovery. so we need to retrieve
    # the provider context again
    try:
        logger.info('Retrieving provider context')
        management_ip = utils.get_management_server_ip()
        use(management_ip, utils.get_rest_port())
    except BaseException as e:
        logger.warning('Failed retrieving provider context: {0}. This '
                       'may cause a leaking management server '
                       'in case it has gone through a '
                       'recovery process'.format(str(e)))

    # reload settings since the provider context maybe changed
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.teardown()

    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()

    logger.info("teardown complete")
コード例 #10
0
ファイル: teardown.py プロジェクト: pkdevboxy/cloudify-cli
def _do_teardown():
    # reload settings since the provider context maybe changed
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(provider_context.get("cloudify", {}).get("manager_deployment"))
    bs.teardown()
    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()
コード例 #11
0
def recover(force, task_retries, task_retry_interval, task_thread_pool_size):
    logger = get_logger()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    if CLOUDIFY_MANAGER_PK_PATH_ENVAR in os.environ:
        # user defined the key file path inside an env variable.
        # validate the existence of the keyfile because it will later be
        # used in a fabric task to ssh to the manager
        key_path = os.path.expanduser(
            os.environ[CLOUDIFY_MANAGER_PK_PATH_ENVAR])
        if not os.path.isfile(key_path):
            raise exceptions.CloudifyValidationError(
                "Cannot perform recovery. manager private key file "
                "defined in {0} environment variable does not "
                "exist: {1}".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR, key_path))
    else:
        # try retrieving the key file from the local context
        try:
            key_path = os.path.expanduser(utils.get_management_key())
            if not os.path.isfile(key_path):
                # manager key file path exists in context but does not exist
                # in the file system. fail now.
                raise exceptions.CloudifyValidationError(
                    "Cannot perform recovery. manager key file does not "
                    "exist: {0}. Set the manager private key path via the {1} "
                    "environment variable".format(
                        key_path, CLOUDIFY_MANAGER_PK_PATH_ENVAR))
            # in this case, the recovery is executed from the same directory
            # that the bootstrap was executed from. we should not have
            # problems
        except exceptions.CloudifyCliError:
            # manager key file path does not exist in the context. this
            # means the recovery is executed from a different directory than
            # the bootstrap one. is this case the user must set the
            # environment variable to continue.
            raise exceptions.CloudifyValidationError(
                "Cannot perform recovery. manager key file not found. Set "
                "the manager private key path via the {0} environment "
                "variable".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR))

    logger.info('Recovering manager deployment')
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.recover(task_retries=task_retries,
               task_retry_interval=task_retry_interval,
               task_thread_pool_size=task_thread_pool_size)
    logger.info('Successfully recovered manager deployment')
コード例 #12
0
ファイル: teardown.py プロジェクト: pkdevboxy/cloudify-cli
def _do_teardown():
    # reload settings since the provider context maybe changed
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.teardown()
    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()
コード例 #13
0
    def _objects_args_completer(prefix, **kwargs):
        cosmo_wd_settings = utils.load_cloudify_working_dir_settings(
            suppress_error=True)
        if not cosmo_wd_settings:
            return []

        mgmt_ip = cosmo_wd_settings.get_management_server()
        rest_client = utils.get_rest_client(mgmt_ip)
        objs_ids_list = getattr(rest_client, objects_type).list(
            _include=['id'])
        return (obj.id for obj in objs_ids_list if obj.id.startswith(prefix))
コード例 #14
0
    def _objects_args_completer(prefix, **kwargs):
        cosmo_wd_settings = utils.load_cloudify_working_dir_settings(
            suppress_error=True)
        if not cosmo_wd_settings:
            return []

        mgmt_ip = cosmo_wd_settings.get_management_server()
        rest_client = utils.get_rest_client(mgmt_ip)
        objs_ids_list = getattr(rest_client, objects_type).list(
            _include=['id'])
        return (obj.id for obj in objs_ids_list if obj.id.startswith(prefix))
コード例 #15
0
 def _get_manager_version_data(self):
     dir_settings = load_cloudify_working_dir_settings(suppress_error=True)
     if not (dir_settings and dir_settings.get_management_server()):
         return None
     management_ip = dir_settings.get_management_server()
     if not self._connected_to_manager(management_ip):
         return None
     client = get_rest_client(management_ip)
     try:
         version_data = client.manager.get_version()
     except CloudifyClientError:
         return None
     version_data['ip'] = management_ip
     return version_data
コード例 #16
0
 def _get_manager_version_data(self):
     dir_settings = load_cloudify_working_dir_settings(suppress_error=True)
     if not (dir_settings and dir_settings.get_management_server()):
         return None
     management_ip = dir_settings.get_management_server()
     if not self._connected_to_manager(management_ip):
         return None
     client = get_rest_client(management_ip)
     try:
         version_data = client.manager.get_version()
     except CloudifyClientError:
         return None
     version_data['ip'] = management_ip
     return version_data
コード例 #17
0
def workflow_id_completer(prefix, parsed_args, **kwargs):
    # TODO: refactor this into '_objects_args_completer_maker' method once
    #       workflows get their own module in rest-client
    if not parsed_args.deployment_id:
        return []

    cosmo_wd_settings = utils.load_cloudify_working_dir_settings(
        suppress_error=True)
    if not cosmo_wd_settings:
        return []

    mgmt_ip = cosmo_wd_settings.get_management_server()
    rest_client = utils.get_rest_client(mgmt_ip)

    deployment_id = parsed_args.deployment_id
    workflows = rest_client.deployments.get(
        deployment_id, _include=['workflows']).workflows
    return (wf.id for wf in workflows if wf.id.startswith(prefix))
コード例 #18
0
def workflow_id_completer(prefix, parsed_args, **kwargs):
    # TODO: refactor this into '_objects_args_completer_maker' method once
    #       workflows get their own module in rest-client
    if not parsed_args.deployment_id:
        return []

    cosmo_wd_settings = utils.load_cloudify_working_dir_settings(
        suppress_error=True)
    if not cosmo_wd_settings:
        return []

    mgmt_ip = cosmo_wd_settings.get_management_server()
    rest_client = utils.get_rest_client(mgmt_ip)

    deployment_id = parsed_args.deployment_id
    workflows = rest_client.deployments.get(
        deployment_id, _include=['workflows']).workflows
    return (wf.id for wf in workflows if wf.id.startswith(prefix))
コード例 #19
0
ファイル: teardown.py プロジェクト: iconoeugen/cloudify-cli
def teardown(force, ignore_deployments, config_file_path, ignore_validation):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    client = utils.get_rest_client(management_ip)
    if not ignore_deployments and len(client.deployments.list()) > 0:
        msg = ("Management server {0} has active deployments. Add the "
               "'--ignore-deployments' flag to your command to ignore "
               "these deployments and execute topology teardown.".format(
                   management_ip))
        raise exceptions.CloudifyCliError(msg)

    settings = utils.load_cloudify_working_dir_settings()
    if settings.get_is_provider_config():
        provider_common.provider_teardown(config_file_path, ignore_validation)
    else:
        logger.info("tearing down {0}".format(management_ip))
        provider_context = settings.get_provider_context()
        bs.read_manager_deployment_dump_if_needed(
            provider_context.get('cloudify', {}).get('manager_deployment'))
        bs.teardown(name='manager',
                    task_retries=0,
                    task_retry_interval=0,
                    task_thread_pool_size=1)

    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()

    logger.info("teardown complete")
コード例 #20
0
ファイル: teardown.py プロジェクト: iconoeugen/cloudify-cli
def teardown(force, ignore_deployments, config_file_path, ignore_validation):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    client = utils.get_rest_client(management_ip)
    if not ignore_deployments and len(client.deployments.list()) > 0:
        msg = ("Management server {0} has active deployments. Add the "
               "'--ignore-deployments' flag to your command to ignore "
               "these deployments and execute topology teardown."
               .format(management_ip))
        raise exceptions.CloudifyCliError(msg)

    settings = utils.load_cloudify_working_dir_settings()
    if settings.get_is_provider_config():
        provider_common.provider_teardown(config_file_path, ignore_validation)
    else:
        logger.info("tearing down {0}".format(management_ip))
        provider_context = settings.get_provider_context()
        bs.read_manager_deployment_dump_if_needed(
            provider_context.get('cloudify', {}).get('manager_deployment'))
        bs.teardown(name='manager',
                    task_retries=0,
                    task_retry_interval=0,
                    task_thread_pool_size=1)

    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()

    logger.info("teardown complete")
コード例 #21
0
def bootstrap(configuration,
              inputs_override=None,
              manager_blueprint_override=None,
              reset=False):
    """Bootstrap a configuration based environment."""
    conf = Configuration(configuration)
    if not conf.exists() or reset:
        generate(configuration=configuration,
                 inputs_override=inputs_override,
                 manager_blueprint_override=manager_blueprint_override,
                 reset=reset)
    with conf.dir:
        cfy.init().wait()
        with conf.patch.cli_config as patch:
            patch.obj['colors'] = True
        cfy.bootstrap(blueprint_path=conf.manager_blueprint_path,
                      inputs=conf.inputs_path).wait()
        cli_settings = load_cloudify_working_dir_settings()
        with conf.patch.handler_configuration as patch:
            patch.obj.update({
                'manager_ip': cli_settings.get_management_server(),
                'manager_key': cli_settings.get_management_key(),
                'manager_user': cli_settings.get_management_user()
            })
コード例 #22
0
 def get_provider_context(self):
     with self.workdir:
         settings = load_cloudify_working_dir_settings()
         return settings.get_provider_context()
コード例 #23
0
 def get_provider_context(self):
     with self.workdir:
         settings = load_cloudify_working_dir_settings()
         return settings.get_provider_context()
コード例 #24
0
 def _read_cosmo_wd_settings(self):
     return utils.load_cloudify_working_dir_settings()
コード例 #25
0
 def get_management_ip(self):
     with self.workdir:
         settings = load_cloudify_working_dir_settings()
         return settings.get_management_server()
コード例 #26
0
 def get_management_ip(self):
     with self.workdir:
         settings = load_cloudify_working_dir_settings()
         return settings.get_management_server()
コード例 #27
0
 def _read_cosmo_wd_settings(self):
     return utils.load_cloudify_working_dir_settings()
コード例 #28
0
ファイル: bootstrap.py プロジェクト: iconoeugen/cloudify-cli
def bootstrap(config_file_path, keep_up, validate_only, skip_validations,
              blueprint_path, inputs, install_plugins, task_retries,
              task_retry_interval, task_thread_pool_size):
    logger = get_logger()
    settings = utils.load_cloudify_working_dir_settings()
    if settings.get_is_provider_config():
        if blueprint_path or inputs:
            raise ValueError(
                'the "blueprint_path" and "inputs" parameters '
                'are not to be used with the deprecated provider API')
        return provider_common.provider_bootstrap(config_file_path, keep_up,
                                                  validate_only,
                                                  skip_validations)

    env_name = 'manager'

    # verifying no environment exists from a previous bootstrap
    try:
        bs.load_env(env_name)
    except IOError:
        # Environment is clean
        pass
    else:
        raise RuntimeError(
            "Can't bootstrap because the environment is not clean. Clean the "
            'environment by calling teardown or reset it using the "cfy init '
            '-r" command')

    if not skip_validations:
        logger.info('executing bootstrap validation')
        bs.bootstrap_validation(blueprint_path,
                                name=env_name,
                                inputs=inputs,
                                task_retries=task_retries,
                                task_retry_interval=task_retry_interval,
                                task_thread_pool_size=task_thread_pool_size,
                                install_plugins=install_plugins)
        logger.info('bootstrap validation completed successfully')

    if not validate_only:
        try:
            logger.info('executing bootstrap')
            details = bs.bootstrap(blueprint_path,
                                   name=env_name,
                                   inputs=inputs,
                                   task_retries=task_retries,
                                   task_retry_interval=task_retry_interval,
                                   task_thread_pool_size=task_thread_pool_size,
                                   install_plugins=install_plugins)

            manager_ip = details['manager_ip']
            provider_name = details['provider_name']
            provider_context = details['provider_context']
            with utils.update_wd_settings() as ws_settings:
                ws_settings.set_management_server(manager_ip)
                ws_settings.set_management_key(details['manager_key_path'])
                ws_settings.set_management_user(details['manager_user'])
                ws_settings.set_provider(provider_name)
                ws_settings.set_provider_context(provider_context)

            logger.info('bootstrapping complete')
            logger.info('management server is up at {0}'.format(manager_ip))
        except Exception:
            tpe, value, traceback = sys.exc_info()
            logger.error('bootstrap failed!')
            if not keep_up:
                try:
                    bs.load_env(env_name)
                except IOError:
                    # the bootstrap exception occurred before environment was
                    # even initialized - nothing to teardown.
                    pass
                else:
                    logger.info('executing teardown due to failed bootstrap')
                    bs.teardown(name=env_name,
                                task_retries=5,
                                task_retry_interval=30,
                                task_thread_pool_size=1)
            raise tpe, value, traceback