Example #1
0
 def test_manager_deployment_dump(self, remove_deployment=True):
     manager1_original_dir = os.path.join(
         os.path.dirname(__file__),
         'resources', 'storage', 'manager1')
     if not os.path.exists(self.manager_dir):
         shutil.copytree(manager1_original_dir, self.manager_dir)
     result = bootstrap.dump_manager_deployment()
     if remove_deployment:
         shutil.rmtree(self.manager_dir)
         self.assertTrue(
             bootstrap.read_manager_deployment_dump_if_needed(result))
     else:
         self.assertFalse(
             bootstrap.read_manager_deployment_dump_if_needed(result))
     comparison = filecmp.dircmp(manager1_original_dir,
                                 self.manager_dir)
     self.assertIn('dir1', comparison.common)
     self.assertIn('dir2', comparison.common)
     self.assertIn('file1', comparison.common)
     self.assertIn('file2', comparison.common)
     self.assertEqual(comparison.common_funny, [])
     self.assertEqual(comparison.diff_files, [])
     self.assertEqual(comparison.funny_files, [])
     self.assertEqual(comparison.left_only, [])
     self.assertEqual(comparison.right_only, [])
Example #2
0
def recover(force,
            task_retries,
            task_retry_interval,
            task_thread_pool_size):
    logger = get_logger()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    if CLOUDIFY_MANAGER_PK_PATH_ENVAR in os.environ:
        # user defined the key file path inside an env variable.
        # validate the existence of the keyfile because it will later be
        # used in a fabric task to ssh to the manager
        key_path = os.path.expanduser(os.environ[
            CLOUDIFY_MANAGER_PK_PATH_ENVAR])
        if not os.path.isfile(key_path):
            raise exceptions.CloudifyValidationError(
                "Cannot perform recovery. manager private key file "
                "defined in {0} environment variable does not "
                "exist: {1}".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR, key_path)
            )
    else:
        # try retrieving the key file from the local context
        try:
            key_path = os.path.expanduser(utils.get_management_key())
            if not os.path.isfile(key_path):
                # manager key file path exists in context but does not exist
                # in the file system. fail now.
                raise exceptions.CloudifyValidationError(
                    "Cannot perform recovery. manager key file does not "
                    "exist: {0}. Set the manager private key path via the {1} "
                    "environment variable"
                    .format(key_path, CLOUDIFY_MANAGER_PK_PATH_ENVAR)
                )
            # in this case, the recovery is executed from the same directory
            # that the bootstrap was executed from. we should not have
            # problems
        except exceptions.CloudifyCliError:
            # manager key file path does not exist in the context. this
            # means the recovery is executed from a different directory than
            # the bootstrap one. is this case the user must set the
            # environment variable to continue.
            raise exceptions.CloudifyValidationError(
                "Cannot perform recovery. manager key file not found. Set "
                "the manager private key path via the {0} environment "
                "variable".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR)
            )

    logger.info('Recovering manager deployment')
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.recover(task_retries=task_retries,
               task_retry_interval=task_retry_interval,
               task_thread_pool_size=task_thread_pool_size)
    logger.info('Successfully recovered manager deployment')
Example #3
0
def recover(force,
            task_retries,
            task_retry_interval,
            task_thread_pool_size):
    logger = get_logger()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    if CLOUDIFY_MANAGER_PK_PATH_ENVAR not in os.environ:
        if not os.path.isfile(os.path.expanduser(utils.get_management_key())):
            raise RuntimeError("Can't find manager private key file. Set the "
                               "path to it using the {0} environment variable"
                               .format(CLOUDIFY_MANAGER_PK_PATH_ENVAR))

    logger.info('Recovering manager deployment')
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.recover(task_retries=task_retries,
               task_retry_interval=task_retry_interval,
               task_thread_pool_size=task_thread_pool_size)
    logger.info('Successfully recovered manager deployment')
Example #4
0
 def test_manager_deployment_dump(self, remove_deployment=True):
     manager1_original_dir = self._copy_manager1_dir_to_manager_dir()
     result = bootstrap.dump_manager_deployment()
     if remove_deployment:
         shutil.rmtree(self.manager_dir)
         self.assertTrue(
             bootstrap.read_manager_deployment_dump_if_needed(result))
     else:
         # simulating existing read manager deployment dump - .git folder
         # shouldn't appear there, so removing it alone
         shutil.rmtree(os.path.join(self.manager_dir, '.git'))
         self.assertFalse(
             bootstrap.read_manager_deployment_dump_if_needed(result))
     comparison = filecmp.dircmp(manager1_original_dir,
                                 self.manager_dir)
     self.assertIn('dir1', comparison.common)
     self.assertIn('dir2', comparison.common)
     self.assertIn('file1', comparison.common)
     self.assertIn('file2', comparison.common)
     self.assertEqual([], comparison.common_funny)
     self.assertEqual([], comparison.diff_files)
     self.assertEqual([], comparison.funny_files)
     self.assertEqual([], comparison.right_only)
     # .git folder is ignored when archiving manager deployment, and should
     # not appear in the new manager dir, only in the original one;
     # (however, since in the original dir it's named "dotgit" rather than
     # ".git", we check for that instead - yet neither should be in the
     # manager deployment either way)
     self.assertEqual(['dotgit'], comparison.left_only)
def teardown(force, ignore_deployments):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    client = utils.get_rest_client(management_ip)
    try:
        if not ignore_deployments and len(client.deployments.list()) > 0:
            msg = \
                ("Manager server {0} has existing deployments. Delete all "
                 "deployments first or add the '--ignore-deployments' flag to "
                 "your command to ignore these deployments and execute "
                 "teardown.".format(management_ip))
            raise exceptions.CloudifyCliError(msg)
    except IOError:
        msg = \
            "Failed querying manager server {0} about existing " \
            "deployments; The Manager server may be down. If you wish to " \
            'skip this check, you may use the "--ignore-deployments" flag, ' \
            'in which case teardown will occur regardless of the Manager ' \
            "server's status.".format(management_ip)
        raise exceptions.CloudifyCliError(msg)

    logger.info("tearing down {0}".format(management_ip))

    # runtime properties might have changed since the last time we
    # executed 'use', because of recovery. so we need to retrieve
    # the provider context again
    try:
        logger.info('Retrieving provider context')
        management_ip = utils.get_management_server_ip()
        use(management_ip, utils.get_rest_port())
    except BaseException as e:
        logger.warning('Failed retrieving provider context: {0}. This '
                       'may cause a leaking management server '
                       'in case it has gone through a '
                       'recovery process'.format(str(e)))

    # reload settings since the provider context maybe changed
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.teardown()

    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()

    logger.info("teardown complete")
Example #6
0
def _do_teardown():
    # reload settings since the provider context maybe changed
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(provider_context.get("cloudify", {}).get("manager_deployment"))
    bs.teardown()
    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()
Example #7
0
def recover(force, task_retries, task_retry_interval, task_thread_pool_size):
    logger = get_logger()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    if CLOUDIFY_MANAGER_PK_PATH_ENVAR in os.environ:
        # user defined the key file path inside an env variable.
        # validate the existence of the keyfile because it will later be
        # used in a fabric task to ssh to the manager
        key_path = os.path.expanduser(
            os.environ[CLOUDIFY_MANAGER_PK_PATH_ENVAR])
        if not os.path.isfile(key_path):
            raise exceptions.CloudifyValidationError(
                "Cannot perform recovery. manager private key file "
                "defined in {0} environment variable does not "
                "exist: {1}".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR, key_path))
    else:
        # try retrieving the key file from the local context
        try:
            key_path = os.path.expanduser(utils.get_management_key())
            if not os.path.isfile(key_path):
                # manager key file path exists in context but does not exist
                # in the file system. fail now.
                raise exceptions.CloudifyValidationError(
                    "Cannot perform recovery. manager key file does not "
                    "exist: {0}. Set the manager private key path via the {1} "
                    "environment variable".format(
                        key_path, CLOUDIFY_MANAGER_PK_PATH_ENVAR))
            # in this case, the recovery is executed from the same directory
            # that the bootstrap was executed from. we should not have
            # problems
        except exceptions.CloudifyCliError:
            # manager key file path does not exist in the context. this
            # means the recovery is executed from a different directory than
            # the bootstrap one. is this case the user must set the
            # environment variable to continue.
            raise exceptions.CloudifyValidationError(
                "Cannot perform recovery. manager key file not found. Set "
                "the manager private key path via the {0} environment "
                "variable".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR))

    logger.info('Recovering manager deployment')
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.recover(task_retries=task_retries,
               task_retry_interval=task_retry_interval,
               task_thread_pool_size=task_thread_pool_size)
    logger.info('Successfully recovered manager deployment')
Example #8
0
def _do_teardown():
    # reload settings since the provider context maybe changed
    settings = utils.load_cloudify_working_dir_settings()
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    bs.teardown()
    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()
Example #9
0
def teardown(force, ignore_deployments, config_file_path, ignore_validation):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    client = utils.get_rest_client(management_ip)
    if not ignore_deployments and len(client.deployments.list()) > 0:
        msg = ("Management server {0} has active deployments. Add the "
               "'--ignore-deployments' flag to your command to ignore "
               "these deployments and execute topology teardown."
               .format(management_ip))
        raise exceptions.CloudifyCliError(msg)

    settings = utils.load_cloudify_working_dir_settings()
    if settings.get_is_provider_config():
        provider_common.provider_teardown(config_file_path, ignore_validation)
    else:
        logger.info("tearing down {0}".format(management_ip))
        provider_context = settings.get_provider_context()
        bs.read_manager_deployment_dump_if_needed(
            provider_context.get('cloudify', {}).get('manager_deployment'))
        bs.teardown(name='manager',
                    task_retries=0,
                    task_retry_interval=0,
                    task_thread_pool_size=1)

    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()

    logger.info("teardown complete")
Example #10
0
def teardown(force, ignore_deployments, config_file_path, ignore_validation):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    if not force:
        msg = ("This action requires additional "
               "confirmation. Add the '-f' or '--force' "
               "flags to your command if you are certain "
               "this command should be executed.")
        raise exceptions.CloudifyCliError(msg)

    client = utils.get_rest_client(management_ip)
    if not ignore_deployments and len(client.deployments.list()) > 0:
        msg = ("Management server {0} has active deployments. Add the "
               "'--ignore-deployments' flag to your command to ignore "
               "these deployments and execute topology teardown.".format(
                   management_ip))
        raise exceptions.CloudifyCliError(msg)

    settings = utils.load_cloudify_working_dir_settings()
    if settings.get_is_provider_config():
        provider_common.provider_teardown(config_file_path, ignore_validation)
    else:
        logger.info("tearing down {0}".format(management_ip))
        provider_context = settings.get_provider_context()
        bs.read_manager_deployment_dump_if_needed(
            provider_context.get('cloudify', {}).get('manager_deployment'))
        bs.teardown(name='manager',
                    task_retries=0,
                    task_retry_interval=0,
                    task_thread_pool_size=1)

    # cleaning relevant data from working directory settings
    with utils.update_wd_settings() as wd_settings:
        # wd_settings.set_provider_context(provider_context)
        wd_settings.remove_management_server_context()

    logger.info("teardown complete")
Example #11
0
 def test_manager_deployment_dump(self, remove_deployment=True):
     manager1_original_dir = os.path.join(os.path.dirname(__file__),
                                          'resources', 'storage',
                                          'manager1')
     if not os.path.exists(self.manager_dir):
         shutil.copytree(manager1_original_dir, self.manager_dir)
     result = bootstrap.dump_manager_deployment()
     if remove_deployment:
         shutil.rmtree(self.manager_dir)
         self.assertTrue(
             bootstrap.read_manager_deployment_dump_if_needed(result))
     else:
         self.assertFalse(
             bootstrap.read_manager_deployment_dump_if_needed(result))
     comparison = filecmp.dircmp(manager1_original_dir, self.manager_dir)
     self.assertIn('dir1', comparison.common)
     self.assertIn('dir2', comparison.common)
     self.assertIn('file1', comparison.common)
     self.assertIn('file2', comparison.common)
     self.assertEqual(comparison.common_funny, [])
     self.assertEqual(comparison.diff_files, [])
     self.assertEqual(comparison.funny_files, [])
     self.assertEqual(comparison.left_only, [])
     self.assertEqual(comparison.right_only, [])
Example #12
0
 def test_manager_deployment_dump_read_empty(self):
     self.assertFalse(bootstrap.read_manager_deployment_dump_if_needed(''))
     self.assertFalse(os.path.exists(self.manager_dir))
Example #13
0
 def test_manager_deployment_dump_read_empty(self):
     self.assertFalse(bootstrap.read_manager_deployment_dump_if_needed(""))
     self.assertFalse(os.path.exists(self.manager_dir))
import sys

from cloudify_cli import utils
from cloudify_cli.bootstrap import bootstrap as bs
from cloudify_cli.bootstrap import tasks as bstasks


with utils.update_wd_settings() as settings:
    settings.set_management_key(sys.argv[1])
    print 'Manager key set to path: ' + sys.argv[1]
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment')
    )
    env = bs.load_env('manager')
    storage = env.storage
    for instance in storage.get_node_instances():
        manager_user = instance.runtime_properties.get(
            bstasks.MANAGER_USER_RUNTIME_PROPERTY
        )
        if manager_user:
            settings.set_management_user(manager_user)
            print 'Manager user set to: ' + manager_user
            break
import sys

from cloudify_cli import utils
from cloudify_cli.bootstrap import bootstrap as bs
from cloudify_cli.bootstrap import tasks as bstasks

with utils.update_wd_settings() as settings:
    settings.set_management_key(sys.argv[1])
    print 'Manager key set to path: ' + sys.argv[1]
    provider_context = settings.get_provider_context()
    bs.read_manager_deployment_dump_if_needed(
        provider_context.get('cloudify', {}).get('manager_deployment'))
    env = bs.load_env('manager')
    storage = env.storage
    for instance in storage.get_node_instances():
        manager_user = instance.runtime_properties.get(
            bstasks.MANAGER_USER_RUNTIME_PROPERTY)
        if manager_user:
            settings.set_management_user(manager_user)
            print 'Manager user set to: ' + manager_user
            break