def teardown(self):
     if self._global_cleanup_context is None:
         return
     self.setup()
     cfy = CfyHelper(cfy_workdir=self._workdir)
     try:
         cfy.use(self.management_ip)
         cfy.teardown(verbose=True)
     finally:
         self._global_cleanup_context.cleanup()
         self.handler.after_teardown()
         if os.path.exists(self._workdir):
             shutil.rmtree(self._workdir)
Example #2
0
 def setUp(self):
     global test_environment
     self.env = test_environment.setup()
     self.logger = logging.getLogger(self._testMethodName)
     self.logger.setLevel(logging.INFO)
     self.logger.info('Starting test setUp')
     self.workdir = tempfile.mkdtemp(prefix='cosmo-test-')
     self.cfy = CfyHelper(cfy_workdir=self.workdir,
                          management_ip=self.env.management_ip)
     self.client = self.env.rest_client
     self.test_id = 'system-test-{0}'.format(time.strftime("%Y%m%d-%H%M"))
     self.blueprint_yaml = None
     self._test_cleanup_context = self.env.handler.CleanupContext(
         self._testMethodName, self.env)
     # register cleanup
     self.addCleanup(self._cleanup)
Example #3
0
    def bootstrap(self):
        if self._management_running:
            return

        self._global_cleanup_context = self.handler.CleanupContext(
            'testenv', self)

        cfy = CfyHelper(cfy_workdir=self._workdir)

        self.handler.before_bootstrap()
        if self.is_provider_bootstrap:
            cfy.bootstrap_with_providers(
                self.cloudify_config_path,
                self.handler.provider,
                keep_up_on_failure=False,
                verbose=True,
                dev_mode=False)
        else:

            install_plugins = self._get_boolean_env_var(
                INSTALL_MANAGER_BLUEPRINT_DEPENDENCIES, True)

            cfy.bootstrap(
                self._manager_blueprint_path,
                inputs_file=self.cloudify_config_path,
                install_plugins=install_plugins,
                keep_up_on_failure=False,
                verbose=True)
        self._running_env_setup(cfy.get_management_ip())
        self.handler.after_bootstrap(cfy.get_provider_context())
    def bootstrap(self, task_retries=5):
        if self._management_running:
            return

        self._global_cleanup_context = self.handler.CleanupContext(
            'testenv', self)
        cfy = CfyHelper(cfy_workdir=self._workdir)

        self.handler.before_bootstrap()
        cfy.bootstrap(
            self._manager_blueprint_path,
            inputs_file=self.cloudify_config_path,
            install_plugins=self.install_plugins,
            keep_up_on_failure=False,
            task_retries=task_retries,
            verbose=True)
        self._running_env_setup(cfy.get_management_ip())
        self.handler.after_bootstrap(cfy.get_provider_context())
    def setUp(self):
        super(TwoManagersTest, self).setUp()
        self.workdir2 = tempfile.mkdtemp(prefix='cloudify-testenv-')

        self.cfy2 = CfyHelper(self.workdir2, testcase=self)
        second_manager_blueprint_path = '{}_existing'.format(
            self.env._manager_blueprint_path)

        shutil.copy2(self.env._manager_blueprint_path,
                     second_manager_blueprint_path)

        external_resources = [
            'node_templates.management_network.properties',
            'node_templates.management_subnet.properties',
            'node_templates.router.properties',
            'node_templates.agents_security_group.properties',
            'node_templates.management_security_group.properties',
        ]

        with YamlPatcher(second_manager_blueprint_path) as patch:
            for prop in external_resources:
                patch.merge_obj(prop, {'use_external_resource': True})

        second_cloudify_config_path = '{}_existing'.format(
            self.env.cloudify_config_path)

        shutil.copy2(self.env.cloudify_config_path,
                     second_cloudify_config_path)

        new_resources = ['manager_server_name', 'manager_port_name']

        with YamlPatcher(second_cloudify_config_path) as patch:
            for prop in new_resources:
                patch.append_value(prop, '2')

        self.cfy2.bootstrap(blueprint_path=second_manager_blueprint_path,
                            inputs_file=second_cloudify_config_path,
                            install_plugins=self.env.install_plugins,
                            keep_up_on_failure=False,
                            task_retries=5,
                            verbose=False)

        self.client2 = create_rest_client(self.cfy2.get_management_ip())
Example #6
0
 def setUp(self):
     global test_environment
     self.env = test_environment.setup()
     self.logger = logging.getLogger(self._testMethodName)
     self.logger.setLevel(logging.INFO)
     self.workdir = tempfile.mkdtemp(prefix='cosmo-test-')
     self.cfy = CfyHelper(cfy_workdir=self.workdir,
                          management_ip=self.env.management_ip)
     self.client = self.env.rest_client
     self.test_id = 'system-test-{0}'.format(time.strftime("%Y%m%d-%H%M"))
     self.blueprint_yaml = None
     self._test_cleanup_context = self.env.handler.CleanupContext(
         self._testMethodName, self.env)
     # register cleanup
     self.addCleanup(self._cleanup)
    def prepare_manager(self):
        # note that we're using a separate manager checkout, so we need to
        # create our own utils like cfy and the rest client, rather than use
        # the testenv ones
        self.cfy_workdir = tempfile.mkdtemp(prefix='manager-upgrade-')
        self.addCleanup(shutil.rmtree, self.cfy_workdir)
        self.manager_cfy = CfyHelper(cfy_workdir=self.cfy_workdir)

        self.manager_inputs = self._get_bootstrap_inputs()

        self.bootstrap_manager()

        self.rest_client = create_rest_client(self.upgrade_manager_ip)

        self.bootstrap_manager_version = LooseVersion(
            self.rest_client.manager.get_version()['version'])
    def setUp(self):
        super(TwoManagersTest, self).setUp()
        self.workdir2 = tempfile.mkdtemp(prefix='cloudify-testenv-')

        self.cfy2 = CfyHelper(self.workdir2, testcase=self)
        second_manager_blueprint_path = '{}_existing'.format(
            self.env._manager_blueprint_path)

        shutil.copy2(self.env._manager_blueprint_path,
                     second_manager_blueprint_path)

        external_resources = [
            'node_templates.management_network.properties',
            'node_templates.management_subnet.properties',
            'node_templates.router.properties',
            'node_templates.agents_security_group.properties',
            'node_templates.management_security_group.properties',
        ]

        with YamlPatcher(second_manager_blueprint_path) as patch:
            for prop in external_resources:
                patch.merge_obj(prop, {'use_external_resource': True})

        second_cloudify_config_path = '{}_existing'.format(
            self.env.cloudify_config_path)

        shutil.copy2(self.env.cloudify_config_path,
                     second_cloudify_config_path)

        new_resources = ['manager_server_name', 'manager_port_name']

        with YamlPatcher(second_cloudify_config_path) as patch:
            for prop in new_resources:
                patch.append_value(prop, '2')

        self.cfy2.bootstrap(
            blueprint_path=second_manager_blueprint_path,
            inputs_file=second_cloudify_config_path,
            install_plugins=self.env.install_plugins,
            keep_up_on_failure=False,
            task_retries=5,
            verbose=False
        )

        self.client2 = create_rest_client(self.cfy2.get_management_ip())
Example #9
0
 def teardown(self):
     if self._global_cleanup_context is None:
         return
     self.setup()
     cfy = CfyHelper(cfy_workdir=self._workdir)
     try:
         cfy.use(self.management_ip)
         cfy.teardown(verbose=True)
     finally:
         self._global_cleanup_context.cleanup()
         self.handler.after_teardown()
         if os.path.exists(self._workdir):
             shutil.rmtree(self._workdir)
Example #10
0
 def teardown(self):
     if self._global_cleanup_context is None:
         return
     self.setup()
     cfy = CfyHelper(cfy_workdir=self._workdir)
     try:
         cfy.use(self.management_ip, provider=self.is_provider_bootstrap)
         if self.is_provider_bootstrap:
             cfy.teardown_with_providers(
                 self.cloudify_config_path,
                 verbose=True)
         else:
             cfy.teardown(verbose=True)
     finally:
         self._global_cleanup_context.cleanup()
         self.handler.after_teardown()
         if os.path.exists(self._workdir):
             shutil.rmtree(self._workdir)
 def setUp(self):
     global test_environment
     self.env = test_environment.setup()
     self.logger = logging.getLogger(self._testMethodName)
     self.logger.setLevel(logging.INFO)
     self.logger.info('Starting test setUp')
     self.workdir = tempfile.mkdtemp(prefix='cosmo-test-')
     management_user = getattr(self.env, 'management_user_name', None)
     management_key_path = getattr(self.env, 'management_key_path', None)
     self.cfy = CfyHelper(cfy_workdir=self.workdir,
                          management_ip=self.env.management_ip,
                          management_user=management_user,
                          management_key=management_key_path)
     self.client = self.env.rest_client
     self.test_id = 'system-test-{0}-{1}'.format(
         self._testMethodName,
         time.strftime("%Y%m%d-%H%M"))
     self.blueprint_yaml = None
     self._test_cleanup_context = self.env.handler.CleanupContext(
         self._testMethodName, self.env)
     # register cleanup
     self.addCleanup(self._cleanup)
     self.maxDiff = 1024 * 1024 * 10
Example #12
0
    def bootstrap(self, task_retries=5):
        if self._management_running:
            return

        self._global_cleanup_context = self.handler.CleanupContext(
            'testenv', self)

        cfy = CfyHelper(cfy_workdir=self._workdir)

        self.handler.before_bootstrap()
        cfy.bootstrap(
            self._manager_blueprint_path,
            inputs_file=self.cloudify_config_path,
            install_plugins=self.install_plugins,
            keep_up_on_failure=False,
            task_retries=task_retries,
            verbose=True)
        self._running_env_setup(cfy.get_management_ip())
        self.handler.after_bootstrap(cfy.get_provider_context())
Example #13
0
class TestCase(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        pass

    @classmethod
    def tearDownClass(cls):
        pass

    def setUp(self):
        global test_environment
        self.env = test_environment.setup()
        self.logger = logging.getLogger(self._testMethodName)
        self.logger.setLevel(logging.INFO)
        self.workdir = tempfile.mkdtemp(prefix='cosmo-test-')
        self.cfy = CfyHelper(cfy_workdir=self.workdir,
                             management_ip=self.env.management_ip)
        self.client = self.env.rest_client
        self.test_id = 'system-test-{0}'.format(time.strftime("%Y%m%d-%H%M"))
        self.blueprint_yaml = None
        self._test_cleanup_context = self.env.handler.CleanupContext(
            self._testMethodName, self.env)
        # register cleanup
        self.addCleanup(self._cleanup)

    def _cleanup(self):
        self._test_cleanup_context.cleanup()
        shutil.rmtree(self.workdir)

    def tearDown(self):
        # note that the cleanup function is registered in setUp
        # because it is called regardless of whether setUp succeeded or failed
        # unlike tearDown which is not called when setUp fails (which might
        # happen when tests override setUp)
        if self.env.management_ip:
            try:
                self.logger.info('Running ps aux on Cloudify manager...')
                output = StringIO()
                with fabric_api.settings(
                        user=self.env.management_user_name,
                        host_string=self.env.management_ip,
                        key_filename=get_actual_keypath(
                            self.env,
                            self.env.management_key_path),
                        disable_known_hosts=True):
                    fabric_api.run('ps aux --sort -rss', stdout=output)
                    self.logger.info(
                        'Cloudify manager ps aux output:\n{0}'.format(
                            output.getvalue()))
            except Exception as e:
                self.logger.info(
                    'Error running ps aux on Cloudify manager: {0}'.format(
                        str(e)))

    def get_manager_state(self):
        self.logger.info('Fetching manager current state')
        blueprints = {}
        for blueprint in self.client.blueprints.list():
            blueprints[blueprint.id] = blueprint
        deployments = {}
        for deployment in self.client.deployments.list():
            deployments[deployment.id] = deployment
        nodes = {}
        for deployment_id in deployments.keys():
            for node in self.client.node_instances.list(deployment_id):
                nodes[node.id] = node
        deployment_nodes = {}
        node_state = {}
        for deployment_id in deployments.keys():
            deployment_nodes[deployment_id] = self.client.node_instances.list(
                deployment_id)
            node_state[deployment_id] = {}
            for node in deployment_nodes[deployment_id]:
                node_state[deployment_id][node.id] = node

        return {
            'blueprints': blueprints,
            'deployments': deployments,
            'nodes': nodes,
            'node_state': node_state,
            'deployment_nodes': deployment_nodes
        }

    def get_manager_state_delta(self, before, after):
        after = copy.deepcopy(after)
        for blueprint_id in before['blueprints'].keys():
            del after['blueprints'][blueprint_id]
        for deployment_id in before['deployments'].keys():
            del after['deployments'][deployment_id]
            del after['deployment_nodes'][deployment_id]
            del after['node_state'][deployment_id]
        for node_id in before['nodes'].keys():
            del after['nodes'][node_id]
        return after

    def execute_install(self,
                        deployment_id=None,
                        fetch_state=True):
        return self._make_operation_with_before_after_states(
            self.cfy.execute_install,
            fetch_state,
            deployment_id=deployment_id)

    def upload_deploy_and_execute_install(self, blueprint_id=None,
                                          deployment_id=None,
                                          fetch_state=True,
                                          inputs=None):

        return self._make_operation_with_before_after_states(
            self.cfy.upload_deploy_and_execute_install,
            fetch_state,
            str(self.blueprint_yaml),
            blueprint_id=blueprint_id or self.test_id,
            deployment_id=deployment_id or self.test_id,
            inputs=inputs)

    def _make_operation_with_before_after_states(self, operation, fetch_state,
                                                 *args, **kwargs):
        before_state = None
        after_state = None
        if fetch_state:
            before_state = self.get_manager_state()
        operation(*args, **kwargs)
        if fetch_state:
            after_state = self.get_manager_state()
        return before_state, after_state

    def execute_uninstall(self, deployment_id=None):
        self.cfy.execute_uninstall(deployment_id=deployment_id or self.test_id)

    def copy_blueprint(self, blueprint_dir_name):
        blueprint_path = path(self.workdir) / blueprint_dir_name
        shutil.copytree(get_blueprint_path(blueprint_dir_name),
                        str(blueprint_path))
        return blueprint_path

    def wait_for_execution(self, execution, timeout):
        end = time.time() + timeout
        while time.time() < end:
            status = self.client.executions.get(execution.id).status
            if status == 'failed':
                raise AssertionError('Execution "{}" failed'.format(
                    execution.id))
            if status == 'terminated':
                return
            time.sleep(1)
        events, _ = self.client.events.get(execution.id,
                                           batch_size=1000,
                                           include_logs=True)
        self.logger.info('Deployment creation events & logs:')
        for event in events:
            self.logger.info(json.dumps(event))
        raise AssertionError('Execution "{}" timed out'.format(execution.id))

    def repetitive(self, func, timeout=10, exception_class=Exception,
                   **kwargs):
        deadline = time.time() + timeout
        while True:
            try:
                func(**kwargs)
                break
            except exception_class:
                if time.time() > deadline:
                    raise
                time.sleep(1)
class TestCase(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        pass

    @classmethod
    def tearDownClass(cls):
        pass

    def wait_until_deployment_ready_and_execute_install(self,
                                                        deployment_id,
                                                        inputs):
        self.wait_until_all_deployment_executions_end(deployment_id)
        return self.execute_install(deployment_id=deployment_id)

    def wait_until_all_deployment_executions_end(
            self,
            deployment_id=None,
            include_system_workflows=False,
            verify_no_failed_execution=False,
            end_status_list=Execution.END_STATES):
        if deployment_id:
            msg = "Waiting for executions on " \
                  "deployment {0} to finish".format(deployment_id)
        else:
            msg = "Waiting for system wide executions to finish"
        if include_system_workflows:
            msg = "{0}, including system workflows.".format(msg)
        self.logger.info(msg)

        start_time = time.time()
        while len([execution for execution in self.client.executions.list(
                deployment_id=deployment_id,
                include_system_workflows=include_system_workflows)
                if execution["status"] not in end_status_list]) > 0:
            time.sleep(1)
            if start_time - time.time() > DEFAULT_EXECUTE_TIMEOUT:
                if deployment_id:
                    timeout_msg = "Timeout while waiting for " \
                                  "executions to end " \
                                  "on deployment {0}.".format(deployment_id)
                else:
                    timeout_msg = "Timeout while waiting for " \
                                  "system wide executions to end."
                raise Exception(timeout_msg)

        if verify_no_failed_execution:
            executions = [e for e in self.client.executions.list(
                          deployment_id=deployment_id,
                          include_system_workflows=include_system_workflows)
                          if e["status"] == Execution.FAILED]
            if executions:
                self.logger.error('Failed executions found.')
            for e in executions:
                self.logger.error('Execution {0} logs:'.format(e.id))
                self.logger.error(self.cfy.list_events(
                    execution_id=e.id, verbosity='-vv'))

        return

    def assert_outputs(self, expected_outputs, deployment_id=None):
        if deployment_id is None:
            deployment_id = self.test_id
        outputs = self.client.deployments.outputs.get(deployment_id)
        outputs = outputs['outputs']
        self.assertEqual(expected_outputs, outputs)

    def setUp(self):
        global test_environment
        self.env = test_environment.setup()
        self.logger = logging.getLogger(self._testMethodName)
        self.logger.setLevel(logging.INFO)
        self.logger.info('Starting test setUp')
        self.workdir = tempfile.mkdtemp(prefix='cosmo-test-')
        management_user = getattr(self.env, 'management_user_name', None)
        management_key_path = getattr(self.env, 'management_key_path', None)
        self.cfy = CfyHelper(cfy_workdir=self.workdir,
                             management_ip=self.env.management_ip,
                             management_user=management_user,
                             management_key=management_key_path)
        self.client = self.env.rest_client
        self.test_id = 'system-test-{0}-{1}'.format(
            self._testMethodName,
            time.strftime("%Y%m%d-%H%M"))
        self.blueprint_yaml = None
        self._test_cleanup_context = self.env.handler.CleanupContext(
            self._testMethodName, self.env)
        # register cleanup
        self.addCleanup(self._cleanup)
        self.maxDiff = 1024 * 1024 * 10

    def _cleanup(self):
        self.env.setup()
        self._test_cleanup_context.cleanup()
        shutil.rmtree(self.workdir)

    def tearDown(self):
        self.logger.info('Starting test tearDown')
        # note that the cleanup function is registered in setUp
        # because it is called regardless of whether setUp succeeded or failed
        # unlike tearDown which is not called when setUp fails (which might
        # happen when tests override setUp)

    def get_manager_state(self):
        self.logger.info('Fetching manager current state')
        blueprints = {}
        for blueprint in self.client.blueprints.list():
            blueprints[blueprint.id] = blueprint
        deployments = {}
        for deployment in self.client.deployments.list():
            deployments[deployment.id] = deployment
        nodes = {}
        for deployment_id in deployments.keys():
            for node in self.client.node_instances.list(deployment_id):
                nodes[node.id] = node
        deployment_nodes = {}
        node_state = {}
        for deployment_id in deployments.keys():
            deployment_nodes[deployment_id] = self.client.node_instances.list(
                deployment_id).items
            node_state[deployment_id] = {}
            for node in deployment_nodes[deployment_id]:
                node_state[deployment_id][node.id] = node

        return {
            'blueprints': blueprints,
            'deployments': deployments,
            'nodes': nodes,
            'node_state': node_state,
            'deployment_nodes': deployment_nodes
        }

    def get_manager_state_delta(self, before, after):
        after = copy.deepcopy(after)
        for blueprint_id in before['blueprints'].keys():
            del after['blueprints'][blueprint_id]
        for deployment_id in before['deployments'].keys():
            del after['deployments'][deployment_id]
            del after['deployment_nodes'][deployment_id]
            del after['node_state'][deployment_id]
        for node_id in before['nodes'].keys():
            del after['nodes'][node_id]
        return after

    def execute_install(self,
                        deployment_id=None,
                        fetch_state=True):
        self.logger.info("attempting to execute install on deployment {0}"
                         .format(deployment_id))
        return self._make_operation_with_before_after_states(
            self.cfy.execute_install,
            fetch_state,
            deployment_id=deployment_id)

    def install(
            self,
            blueprint_id=None,
            deployment_id=None,
            fetch_state=True,
            execute_timeout=DEFAULT_EXECUTE_TIMEOUT,
            inputs=None):

        return self._make_operation_with_before_after_states(
            self.cfy.install,
            fetch_state,
            str(self.blueprint_yaml),
            blueprint_id=blueprint_id or self.test_id,
            deployment_id=deployment_id or self.test_id,
            execute_timeout=execute_timeout,
            inputs=inputs)

    upload_deploy_and_execute_install = install

    def upload_blueprint(
            self,
            blueprint_id):
        self.logger.info("attempting to upload blueprint {0}"
                         .format(blueprint_id))
        return self.cfy.upload_blueprint(
            blueprint_id=blueprint_id,
            blueprint_path=str(self.blueprint_yaml))

    def create_deployment(
            self,
            blueprint_id,
            deployment_id,
            inputs):
        self.logger.info("attempting to create_deployment deployment {0}"
                         .format(deployment_id))
        return self.cfy.create_deployment(
            blueprint_id=blueprint_id,
            deployment_id=deployment_id,
            inputs=inputs)

    def _make_operation_with_before_after_states(self, operation, fetch_state,
                                                 *args, **kwargs):
        before_state = None
        after_state = None
        if fetch_state:
            before_state = self.get_manager_state()
        operation(*args, **kwargs)
        if fetch_state:
            after_state = self.get_manager_state()
        return before_state, after_state

    def execute_uninstall(self, deployment_id=None, cfy=None,
                          delete_deployment_and_blueprint=False):
        cfy = cfy or self.cfy
        if delete_deployment_and_blueprint:
            cfy.uninstall(deployment_id=deployment_id or self.test_id,
                          workflow_id='uninstall',
                          parameters=None,
                          allow_custom_parameters=False,
                          timeout=DEFAULT_EXECUTE_TIMEOUT,
                          include_logs=True)
        else:
            cfy.execute_uninstall(deployment_id=deployment_id or self.test_id)

    def copy_blueprint(self, blueprint_dir_name, blueprints_dir=None):
        blueprint_path = path(self.workdir) / blueprint_dir_name
        shutil.copytree(get_blueprint_path(blueprint_dir_name, blueprints_dir),
                        str(blueprint_path))
        return blueprint_path

    def wait_for_execution(self, execution, timeout, client=None,
                           assert_success=True):
        def dump_events(_client, _execution):
            events, _ = _client.events.get(_execution.id,
                                           batch_size=1000,
                                           include_logs=True)
            self.logger.info('Deployment creation events & logs:')
            for event in events:
                self.logger.info(json.dumps(event))
        client = client or self.client
        end = time.time() + timeout
        while time.time() < end:
            status = client.executions.get(execution.id).status
            if status == 'failed':
                if assert_success:
                    dump_events(client, execution)
                    raise AssertionError('Execution "{}" failed'.format(
                        execution.id))
                else:
                    return
            if status == 'terminated':
                return
            time.sleep(1)
        dump_events(client, execution)
        raise AssertionError('Execution "{}" timed out'.format(execution.id))

    def repetitive(self, func, timeout=10, exception_class=Exception,
                   args=None, kwargs=None):
        args = args or []
        kwargs = kwargs or {}
        deadline = time.time() + timeout
        while True:
            try:
                return func(*args, **kwargs)
            except exception_class:
                if time.time() > deadline:
                    raise
                time.sleep(1)

    @contextmanager
    def manager_env_fabric(self, **kwargs):
        with fabric.context_managers.settings(
                host_string=self.cfy.get_management_ip(),
                user=self.env.management_user_name,
                key_filename=self.env.management_key_path,
                **kwargs):
            yield fabric.api

    def run_commands_on_agent_host(self,
                                   user,
                                   commands,
                                   deployment_id=None,
                                   compute_node_id=None,
                                   compute_node_instance_id=None):
        if not (compute_node_id or compute_node_instance_id):
            self.fail('no node_id or node_instance_id')
        deployment_id = deployment_id or self.test_id
        filters = {'deployment_id': deployment_id}
        if compute_node_id:
            filters['node_id'] = compute_node_id
        if compute_node_instance_id:
            filters['id'] = compute_node_instance_id
        computes = self.client.node_instances.list(**filters).items
        if not computes:
            self.fail('No compute nodes were found')
        if len(computes) > 1:
            self.fail('More than one instance found, please refine your query:'
                      ' {0}'.format(computes))
        compute = computes[0]
        private_ip = compute.runtime_properties['ip']
        with self.manager_env_fabric() as api:
            api.sudo('ssh '
                     '-o UserKnownHostsFile=/dev/null '
                     '-o StrictHostKeyChecking=no '
                     '-t -i /root/.ssh/agent_key.pem {0}@{1} "{2}"'
                     .format(user, private_ip, ' && '.join(commands)))

    def wait_for_resource(self, predicate_func, timeout_sec=60):
        timeout = time.time() + timeout_sec
        while True:
            if time.time() > timeout:
                raise RuntimeError('Failed waiting for resource')
            try:
                result = predicate_func()
                if result:
                    break
            except Exception as e:
                logger.info('predicate function raised an error; {error}'
                            .format(error=e))
            time.sleep(1)
Example #15
0
class TestCase(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        pass

    @classmethod
    def tearDownClass(cls):
        pass

    def assert_outputs(self, expected_outputs, deployment_id=None):
        if deployment_id is None:
            deployment_id = self.test_id
        outputs = self.client.deployments.outputs.get(deployment_id)
        outputs = outputs['outputs']
        self.assertEqual(expected_outputs, outputs)

    def setUp(self):
        global test_environment
        self.env = test_environment.setup()
        self.logger = logging.getLogger(self._testMethodName)
        self.logger.setLevel(logging.INFO)
        self.logger.info('Starting test setUp')
        self.workdir = tempfile.mkdtemp(prefix='cosmo-test-')
        self.cfy = CfyHelper(cfy_workdir=self.workdir,
                             management_ip=self.env.management_ip)
        self.client = self.env.rest_client
        self.test_id = 'system-test-{0}'.format(time.strftime("%Y%m%d-%H%M"))
        self.blueprint_yaml = None
        self._test_cleanup_context = self.env.handler.CleanupContext(
            self._testMethodName, self.env)
        # register cleanup
        self.addCleanup(self._cleanup)

    def _cleanup(self):
        self._test_cleanup_context.cleanup()
        shutil.rmtree(self.workdir)

    def tearDown(self):
        self.logger.info('Starting test tearDown')
        # note that the cleanup function is registered in setUp
        # because it is called regardless of whether setUp succeeded or failed
        # unlike tearDown which is not called when setUp fails (which might
        # happen when tests override setUp)

    def get_manager_state(self):
        self.logger.info('Fetching manager current state')
        blueprints = {}
        for blueprint in self.client.blueprints.list():
            blueprints[blueprint.id] = blueprint
        deployments = {}
        for deployment in self.client.deployments.list():
            deployments[deployment.id] = deployment
        nodes = {}
        for deployment_id in deployments.keys():
            for node in self.client.node_instances.list(deployment_id):
                nodes[node.id] = node
        deployment_nodes = {}
        node_state = {}
        for deployment_id in deployments.keys():
            deployment_nodes[deployment_id] = self.client.node_instances.list(
                deployment_id)
            node_state[deployment_id] = {}
            for node in deployment_nodes[deployment_id]:
                node_state[deployment_id][node.id] = node

        return {
            'blueprints': blueprints,
            'deployments': deployments,
            'nodes': nodes,
            'node_state': node_state,
            'deployment_nodes': deployment_nodes
        }

    def get_manager_state_delta(self, before, after):
        after = copy.deepcopy(after)
        for blueprint_id in before['blueprints'].keys():
            del after['blueprints'][blueprint_id]
        for deployment_id in before['deployments'].keys():
            del after['deployments'][deployment_id]
            del after['deployment_nodes'][deployment_id]
            del after['node_state'][deployment_id]
        for node_id in before['nodes'].keys():
            del after['nodes'][node_id]
        return after

    def execute_install(self,
                        deployment_id=None,
                        fetch_state=True):
        return self._make_operation_with_before_after_states(
            self.cfy.execute_install,
            fetch_state,
            deployment_id=deployment_id)

    def upload_deploy_and_execute_install(
            self,
            blueprint_id=None,
            deployment_id=None,
            fetch_state=True,
            execute_timeout=DEFAULT_EXECUTE_TIMEOUT,
            inputs=None):

        return self._make_operation_with_before_after_states(
            self.cfy.upload_deploy_and_execute_install,
            fetch_state,
            str(self.blueprint_yaml),
            blueprint_id=blueprint_id or self.test_id,
            deployment_id=deployment_id or self.test_id,
            execute_timeout=execute_timeout,
            inputs=inputs)

    def _make_operation_with_before_after_states(self, operation, fetch_state,
                                                 *args, **kwargs):
        before_state = None
        after_state = None
        if fetch_state:
            before_state = self.get_manager_state()
        operation(*args, **kwargs)
        if fetch_state:
            after_state = self.get_manager_state()
        return before_state, after_state

    def execute_uninstall(self, deployment_id=None):
        self.cfy.execute_uninstall(deployment_id=deployment_id or self.test_id)

    def copy_blueprint(self, blueprint_dir_name, blueprints_dir=None):
        blueprint_path = path(self.workdir) / blueprint_dir_name
        shutil.copytree(get_blueprint_path(blueprint_dir_name, blueprints_dir),
                        str(blueprint_path))
        return blueprint_path

    def wait_for_execution(self, execution, timeout):
        end = time.time() + timeout
        while time.time() < end:
            status = self.client.executions.get(execution.id).status
            if status == 'failed':
                raise AssertionError('Execution "{}" failed'.format(
                    execution.id))
            if status == 'terminated':
                return
            time.sleep(1)
        events, _ = self.client.events.get(execution.id,
                                           batch_size=1000,
                                           include_logs=True)
        self.logger.info('Deployment creation events & logs:')
        for event in events:
            self.logger.info(json.dumps(event))
        raise AssertionError('Execution "{}" timed out'.format(execution.id))

    def repetitive(self, func, timeout=10, exception_class=Exception,
                   args=None, kwargs=None):
        args = args or []
        kwargs = kwargs or {}
        deadline = time.time() + timeout
        while True:
            try:
                return func(*args, **kwargs)
            except exception_class:
                if time.time() > deadline:
                    raise
                time.sleep(1)

    @contextmanager
    def manager_env_fabric(self, **kwargs):
        with fabric.context_managers.settings(
                host_string=self.cfy.get_management_ip(),
                user=self.env.management_user_name,
                key_filename=self.env.management_key_path,
                **kwargs):
            yield fabric.api
class TwoManagersTest(OpenStackNodeCellarTest):
    """
    This test bootstraps managers, installs nodecellar using the first manager,
    checks whether it has been installed correctly, creates a snapshot,
    downloads it, uploads it to the second manager, uninstalls nodecellar using
    the second manager, checks whether nodecellar is not running indeed and
    tears down those managers.
    """

    def setUp(self):
        super(TwoManagersTest, self).setUp()
        self.workdir2 = tempfile.mkdtemp(prefix='cloudify-testenv-')

        self.cfy2 = CfyHelper(self.workdir2, testcase=self)
        second_manager_blueprint_path = '{}_existing'.format(
            self.env._manager_blueprint_path)

        shutil.copy2(self.env._manager_blueprint_path,
                     second_manager_blueprint_path)

        external_resources = [
            'node_templates.management_network.properties',
            'node_templates.management_subnet.properties',
            'node_templates.router.properties',
            'node_templates.agents_security_group.properties',
            'node_templates.management_security_group.properties',
        ]

        with YamlPatcher(second_manager_blueprint_path) as patch:
            for prop in external_resources:
                patch.merge_obj(prop, {'use_external_resource': True})

        second_cloudify_config_path = '{}_existing'.format(
            self.env.cloudify_config_path)

        shutil.copy2(self.env.cloudify_config_path,
                     second_cloudify_config_path)

        new_resources = ['manager_server_name', 'manager_port_name']

        with YamlPatcher(second_cloudify_config_path) as patch:
            for prop in new_resources:
                patch.append_value(prop, '2')

        self.cfy2.bootstrap(
            blueprint_path=second_manager_blueprint_path,
            inputs_file=second_cloudify_config_path,
            install_plugins=self.env.install_plugins,
            keep_up_on_failure=False,
            task_retries=5,
            verbose=False
        )

        self.client2 = create_rest_client(self.cfy2.get_management_ip())

    def _start_execution_and_wait(self, client, deployment, workflow_id):
        execution = client.executions.start(deployment, workflow_id)
        self.wait_for_execution(execution, self.default_timeout, client)

    def _create_snapshot(self, client, name):
        self.wait_for_stop_dep_env_execution_to_end(self.test_id)
        execution = client.snapshots.create(name, False, False)
        self.wait_for_execution(execution, self.default_timeout, client)

    def _restore_snapshot(self, client, name):
        execution = client.snapshots.restore(name, True)
        self.wait_for_execution(execution, self.default_timeout, client)

    def on_nodecellar_installed(self):
        self.logger.info('Creating snapshot...')
        self._create_snapshot(self.client, self.test_id)
        try:
            self.client.snapshots.get(self.test_id)
        except CloudifyClientError as e:
            self.fail(e.message)
        self.logger.info('Snapshot created.')

        self.logger.info('Downloading snapshot...')
        snapshot_file_name = ''.join(random.choice(string.ascii_letters)
                                     for _ in xrange(10))
        snapshot_file_path = os.path.join('/tmp', snapshot_file_name)
        self.client.snapshots.download(self.test_id, snapshot_file_path)
        self.logger.info('Snapshot downloaded.')

        self.logger.info('Uploading snapshot to the second manager...')
        self.client2.snapshots.upload(snapshot_file_path, self.test_id)

        try:
            uploaded_snapshot = self.client2.snapshots.get(
                self.test_id)
            self.assertEqual(
                uploaded_snapshot.status,
                'uploaded',
                "Snapshot {} has a wrong status: '{}' instead of 'uploaded'."
                .format(self.test_id, uploaded_snapshot.status)
            )
        except CloudifyClientError as e:
            self.fail(e.message)
        self.logger.info('Snapshot uploaded.')

        self.logger.info('Removing snapshot file...')
        if os.path.isfile(snapshot_file_path):
            os.remove(snapshot_file_path)
        self.logger.info('Snapshot file removed.')

        self.logger.info('Restoring snapshot...')
        self._restore_snapshot(self.client2, self.test_id)
        try:
            self.client2.deployments.get(self.test_id)
        except CloudifyClientError as e:
            self.fail(e.message)
        self.logger.info('Snapshot restored.')

        self.logger.info('Installing new agents...')
        self._start_execution_and_wait(self.client2, self.test_id,
                                       'install_new_agents')
        self.logger.info('Installed new agents.')
        self.wait_for_stop_dep_env_execution_to_end(self.test_id,
                                                    client=self.client2)

    def execute_uninstall(self, deployment_id=None, cfy=None):
        super(TwoManagersTest, self).execute_uninstall(
            cfy=self.cfy2)

    def post_uninstall_assertions(self, client=None):
        super(TwoManagersTest, self).post_uninstall_assertions(
            self.client2)

    @property
    def default_timeout(self):
        return 1000
Example #17
0
class TestCase(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        pass

    @classmethod
    def tearDownClass(cls):
        pass

    def wait_until_deployment_ready_and_execute_install(self,
                                                        deployment_id,
                                                        inputs):
        self.wait_until_all_deployment_executions_end(deployment_id)
        return self.execute_install(deployment_id=deployment_id)

    def wait_until_all_deployment_executions_end(
            self,
            deployment_id=None,
            include_system_workflows=False):
        if deployment_id:
            msg = "Waiting for executions on " \
                  "deployment {0} to finish".format(deployment_id)
        else:
            msg = "Waiting for system wide executions to finish"
        if include_system_workflows:
            msg = "{0}, including system workflows.".format(msg)
        self.logger.info(msg)

        start_time = time.time()
        while len([execution for execution in self.client.executions.list(
                deployment_id=deployment_id,
                include_system_workflows=include_system_workflows)
                if execution["status"] not in Execution.END_STATES]) > 0:
            time.sleep(1)
            if start_time - time.time() > DEFAULT_EXECUTE_TIMEOUT:
                if deployment_id:
                    timeout_msg = "Timeout while waiting for " \
                                  "executions to end " \
                                  "on deployment {0}.".format(deployment_id)
                else:
                    timeout_msg = "Timeout while waiting for " \
                                  "system wide executions to end."
                raise Exception(timeout_msg)
        return

    def assert_outputs(self, expected_outputs, deployment_id=None):
        if deployment_id is None:
            deployment_id = self.test_id
        outputs = self.client.deployments.outputs.get(deployment_id)
        outputs = outputs['outputs']
        self.assertEqual(expected_outputs, outputs)

    def setUp(self):
        global test_environment
        self.env = test_environment.setup()
        self.logger = logging.getLogger(self._testMethodName)
        self.logger.setLevel(logging.INFO)
        self.logger.info('Starting test setUp')
        self.workdir = tempfile.mkdtemp(prefix='cosmo-test-')
        self.cfy = CfyHelper(cfy_workdir=self.workdir,
                             management_ip=self.env.management_ip,
                             testcase=self)
        self.client = self.env.rest_client
        self.test_id = 'system-test-{0}'.format(time.strftime("%Y%m%d-%H%M"))
        self.blueprint_yaml = None
        self._test_cleanup_context = self.env.handler.CleanupContext(
            self._testMethodName, self.env)
        # register cleanup
        self.addCleanup(self._cleanup)

    def _cleanup(self):
        self._test_cleanup_context.cleanup()
        shutil.rmtree(self.workdir)

    def tearDown(self):
        self.logger.info('Starting test tearDown')
        # note that the cleanup function is registered in setUp
        # because it is called regardless of whether setUp succeeded or failed
        # unlike tearDown which is not called when setUp fails (which might
        # happen when tests override setUp)

    def get_manager_state(self):
        self.logger.info('Fetching manager current state')
        blueprints = {}
        for blueprint in self.client.blueprints.list():
            blueprints[blueprint.id] = blueprint
        deployments = {}
        for deployment in self.client.deployments.list():
            deployments[deployment.id] = deployment
        nodes = {}
        for deployment_id in deployments.keys():
            for node in self.client.node_instances.list(deployment_id):
                nodes[node.id] = node
        deployment_nodes = {}
        node_state = {}
        for deployment_id in deployments.keys():
            deployment_nodes[deployment_id] = self.client.node_instances.list(
                deployment_id).items
            node_state[deployment_id] = {}
            for node in deployment_nodes[deployment_id]:
                node_state[deployment_id][node.id] = node

        return {
            'blueprints': blueprints,
            'deployments': deployments,
            'nodes': nodes,
            'node_state': node_state,
            'deployment_nodes': deployment_nodes
        }

    def get_manager_state_delta(self, before, after):
        after = copy.deepcopy(after)
        for blueprint_id in before['blueprints'].keys():
            del after['blueprints'][blueprint_id]
        for deployment_id in before['deployments'].keys():
            del after['deployments'][deployment_id]
            del after['deployment_nodes'][deployment_id]
            del after['node_state'][deployment_id]
        for node_id in before['nodes'].keys():
            del after['nodes'][node_id]
        return after

    def execute_install(self,
                        deployment_id=None,
                        fetch_state=True):
        self.logger.info("attempting to execute install on deployment {0}"
                         .format(deployment_id))
        return self._make_operation_with_before_after_states(
            self.cfy.execute_install,
            fetch_state,
            deployment_id=deployment_id)

    def upload_deploy_and_execute_install(
            self,
            blueprint_id=None,
            deployment_id=None,
            fetch_state=True,
            execute_timeout=DEFAULT_EXECUTE_TIMEOUT,
            inputs=None):

        return self._make_operation_with_before_after_states(
            self.cfy.upload_deploy_and_execute_install,
            fetch_state,
            str(self.blueprint_yaml),
            blueprint_id=blueprint_id or self.test_id,
            deployment_id=deployment_id or self.test_id,
            execute_timeout=execute_timeout,
            inputs=inputs)

    def upload_blueprint(
            self,
            blueprint_id):
        self.logger.info("attempting to upload blueprint {0}"
                         .format(blueprint_id))
        return self.cfy.upload_blueprint(
            blueprint_id=blueprint_id,
            blueprint_path=str(self.blueprint_yaml))

    def create_deployment(
            self,
            blueprint_id,
            deployment_id,
            inputs):
        self.logger.info("attempting to create_deployment deployment {0}"
                         .format(deployment_id))
        return self.cfy.create_deployment(
            blueprint_id=blueprint_id,
            deployment_id=deployment_id,
            inputs=inputs)

    def _make_operation_with_before_after_states(self, operation, fetch_state,
                                                 *args, **kwargs):
        before_state = None
        after_state = None
        if fetch_state:
            before_state = self.get_manager_state()
        operation(*args, **kwargs)
        if fetch_state:
            after_state = self.get_manager_state()
        return before_state, after_state

    def execute_uninstall(self, deployment_id=None, cfy=None):
        cfy = cfy or self.cfy
        cfy.execute_uninstall(deployment_id=deployment_id or self.test_id)

    def copy_blueprint(self, blueprint_dir_name, blueprints_dir=None):
        blueprint_path = path(self.workdir) / blueprint_dir_name
        shutil.copytree(get_blueprint_path(blueprint_dir_name, blueprints_dir),
                        str(blueprint_path))
        return blueprint_path

    def wait_for_execution(self, execution, timeout, client=None,
                           assert_success=True):
        client = client or self.client
        end = time.time() + timeout
        while time.time() < end:
            status = client.executions.get(execution.id).status
            if status == 'failed':
                if assert_success:
                    raise AssertionError('Execution "{}" failed'.format(
                        execution.id))
                else:
                    return
            if status == 'terminated':
                return
            time.sleep(1)
        events, _ = client.events.get(execution.id,
                                      batch_size=1000,
                                      include_logs=True)
        self.logger.info('Deployment creation events & logs:')
        for event in events:
            self.logger.info(json.dumps(event))
        raise AssertionError('Execution "{}" timed out'.format(execution.id))

    def wait_for_stop_dep_env_execution_to_end(self, deployment_id,
                                               timeout_seconds=240,
                                               client=None):
        client = client or self.client

        executions = client.executions.list(
            deployment_id=deployment_id, include_system_workflows=True)
        running_stop_executions = [e for e in executions if e.workflow_id ==
                                   '_stop_deployment_environment' and
                                   e.status not in Execution.END_STATES]

        if not running_stop_executions:
            return

        if len(running_stop_executions) > 1:
            raise RuntimeError('There is more than one running '
                               '"_stop_deployment_environment" execution: {0}'
                               .format(running_stop_executions))

        execution = running_stop_executions[0]
        return self.wait_for_execution(execution, timeout_seconds, client)

    def repetitive(self, func, timeout=10, exception_class=Exception,
                   args=None, kwargs=None):
        args = args or []
        kwargs = kwargs or {}
        deadline = time.time() + timeout
        while True:
            try:
                return func(*args, **kwargs)
            except exception_class:
                if time.time() > deadline:
                    raise
                time.sleep(1)

    @contextmanager
    def manager_env_fabric(self, **kwargs):
        with fabric.context_managers.settings(
                host_string=self.cfy.get_management_ip(),
                user=self.env.management_user_name,
                key_filename=self.env.management_key_path,
                **kwargs):
            yield fabric.api
Example #18
0
class TestCase(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        pass

    @classmethod
    def tearDownClass(cls):
        pass

    def wait_until_deployment_ready_and_execute_install(self,
                                                        deployment_id,
                                                        inputs):
        self.wait_until_all_deployment_executions_end(deployment_id)
        return self.execute_install(deployment_id=deployment_id)

    def wait_until_all_deployment_executions_end(
            self,
            deployment_id=None,
            include_system_workflows=False):
        if deployment_id:
            msg = "Waiting for executions on " \
                  "deployment {0} to finish".format(deployment_id)
        else:
            msg = "Waiting for system wide executions to finish"
        if include_system_workflows:
            msg = "{0}, including system workflows.".format(msg)
        self.logger.info(msg)

        start_time = time.time()
        while len([execution for execution in self.client.executions.list(
                deployment_id=deployment_id,
                include_system_workflows=include_system_workflows)
                if execution["status"] not in Execution.END_STATES]) > 0:
            time.sleep(1)
            if start_time - time.time() > DEFAULT_EXECUTE_TIMEOUT:
                if deployment_id:
                    timeout_msg = "Timeout while waiting for " \
                                  "executions to end " \
                                  "on deployment {0}.".format(deployment_id)
                else:
                    timeout_msg = "Timeout while waiting for " \
                                  "system wide executions to end."
                raise Exception(timeout_msg)
        return

    def assert_outputs(self, expected_outputs, deployment_id=None):
        if deployment_id is None:
            deployment_id = self.test_id
        outputs = self.client.deployments.outputs.get(deployment_id)
        outputs = outputs['outputs']
        self.assertEqual(expected_outputs, outputs)

    def setUp(self):
        global test_environment
        self.env = test_environment.setup()
        self.logger = logging.getLogger(self._testMethodName)
        self.logger.setLevel(logging.INFO)
        self.logger.info('Starting test setUp')
        self.workdir = tempfile.mkdtemp(prefix='cosmo-test-')
        self.cfy = CfyHelper(cfy_workdir=self.workdir,
                             management_ip=self.env.management_ip,
                             testcase=self)
        self.client = self.env.rest_client
        self.test_id = 'system-test-{0}'.format(time.strftime("%Y%m%d-%H%M"))
        self.blueprint_yaml = None
        self._test_cleanup_context = self.env.handler.CleanupContext(
            self._testMethodName, self.env)
        # register cleanup
        self.addCleanup(self._cleanup)
        self.maxDiff = 1024 * 1024 * 10

    def _cleanup(self):
        self._test_cleanup_context.cleanup()
        shutil.rmtree(self.workdir)

    def tearDown(self):
        self.logger.info('Starting test tearDown')
        # note that the cleanup function is registered in setUp
        # because it is called regardless of whether setUp succeeded or failed
        # unlike tearDown which is not called when setUp fails (which might
        # happen when tests override setUp)

    def get_manager_state(self):
        self.logger.info('Fetching manager current state')
        blueprints = {}
        for blueprint in self.client.blueprints.list():
            blueprints[blueprint.id] = blueprint
        deployments = {}
        for deployment in self.client.deployments.list():
            deployments[deployment.id] = deployment
        nodes = {}
        for deployment_id in deployments.keys():
            for node in self.client.node_instances.list(deployment_id):
                nodes[node.id] = node
        deployment_nodes = {}
        node_state = {}
        for deployment_id in deployments.keys():
            deployment_nodes[deployment_id] = self.client.node_instances.list(
                deployment_id).items
            node_state[deployment_id] = {}
            for node in deployment_nodes[deployment_id]:
                node_state[deployment_id][node.id] = node

        return {
            'blueprints': blueprints,
            'deployments': deployments,
            'nodes': nodes,
            'node_state': node_state,
            'deployment_nodes': deployment_nodes
        }

    def get_manager_state_delta(self, before, after):
        after = copy.deepcopy(after)
        for blueprint_id in before['blueprints'].keys():
            del after['blueprints'][blueprint_id]
        for deployment_id in before['deployments'].keys():
            del after['deployments'][deployment_id]
            del after['deployment_nodes'][deployment_id]
            del after['node_state'][deployment_id]
        for node_id in before['nodes'].keys():
            del after['nodes'][node_id]
        return after

    def execute_install(self,
                        deployment_id=None,
                        fetch_state=True):
        self.logger.info("attempting to execute install on deployment {0}"
                         .format(deployment_id))
        return self._make_operation_with_before_after_states(
            self.cfy.execute_install,
            fetch_state,
            deployment_id=deployment_id)

    def upload_deploy_and_execute_install(
            self,
            blueprint_id=None,
            deployment_id=None,
            fetch_state=True,
            execute_timeout=DEFAULT_EXECUTE_TIMEOUT,
            inputs=None):

        return self._make_operation_with_before_after_states(
            self.cfy.upload_deploy_and_execute_install,
            fetch_state,
            str(self.blueprint_yaml),
            blueprint_id=blueprint_id or self.test_id,
            deployment_id=deployment_id or self.test_id,
            execute_timeout=execute_timeout,
            inputs=inputs)

    def upload_blueprint(
            self,
            blueprint_id):
        self.logger.info("attempting to upload blueprint {0}"
                         .format(blueprint_id))
        return self.cfy.upload_blueprint(
            blueprint_id=blueprint_id,
            blueprint_path=str(self.blueprint_yaml))

    def create_deployment(
            self,
            blueprint_id,
            deployment_id,
            inputs):
        self.logger.info("attempting to create_deployment deployment {0}"
                         .format(deployment_id))
        return self.cfy.create_deployment(
            blueprint_id=blueprint_id,
            deployment_id=deployment_id,
            inputs=inputs)

    def _make_operation_with_before_after_states(self, operation, fetch_state,
                                                 *args, **kwargs):
        before_state = None
        after_state = None
        if fetch_state:
            before_state = self.get_manager_state()
        operation(*args, **kwargs)
        if fetch_state:
            after_state = self.get_manager_state()
        return before_state, after_state

    def execute_uninstall(self, deployment_id=None, cfy=None):
        cfy = cfy or self.cfy
        cfy.execute_uninstall(deployment_id=deployment_id or self.test_id)

    def copy_blueprint(self, blueprint_dir_name, blueprints_dir=None):
        blueprint_path = path(self.workdir) / blueprint_dir_name
        shutil.copytree(get_blueprint_path(blueprint_dir_name, blueprints_dir),
                        str(blueprint_path))
        return blueprint_path

    def wait_for_execution(self, execution, timeout, client=None,
                           assert_success=True):
        client = client or self.client
        end = time.time() + timeout
        while time.time() < end:
            status = client.executions.get(execution.id).status
            if status == 'failed':
                if assert_success:
                    raise AssertionError('Execution "{}" failed'.format(
                        execution.id))
                else:
                    return
            if status == 'terminated':
                return
            time.sleep(1)
        events, _ = client.events.get(execution.id,
                                      batch_size=1000,
                                      include_logs=True)
        self.logger.info('Deployment creation events & logs:')
        for event in events:
            self.logger.info(json.dumps(event))
        raise AssertionError('Execution "{}" timed out'.format(execution.id))

    def wait_for_stop_dep_env_execution_to_end(self, deployment_id,
                                               timeout_seconds=240,
                                               client=None):
        client = client or self.client

        executions = client.executions.list(
            deployment_id=deployment_id, include_system_workflows=True)
        running_stop_executions = [e for e in executions if e.workflow_id ==
                                   '_stop_deployment_environment' and
                                   e.status not in Execution.END_STATES]

        if not running_stop_executions:
            return

        if len(running_stop_executions) > 1:
            raise RuntimeError('There is more than one running '
                               '"_stop_deployment_environment" execution: {0}'
                               .format(running_stop_executions))

        execution = running_stop_executions[0]
        return self.wait_for_execution(execution, timeout_seconds, client)

    def repetitive(self, func, timeout=10, exception_class=Exception,
                   args=None, kwargs=None):
        args = args or []
        kwargs = kwargs or {}
        deadline = time.time() + timeout
        while True:
            try:
                return func(*args, **kwargs)
            except exception_class:
                if time.time() > deadline:
                    raise
                time.sleep(1)

    @contextmanager
    def manager_env_fabric(self, **kwargs):
        with fabric.context_managers.settings(
                host_string=self.cfy.get_management_ip(),
                user=self.env.management_user_name,
                key_filename=self.env.management_key_path,
                **kwargs):
            yield fabric.api

    def run_commands_on_agent_host(self,
                                   user,
                                   commands,
                                   deployment_id=None,
                                   compute_node_id=None,
                                   compute_node_instance_id=None):
        if not (compute_node_id or compute_node_instance_id):
            self.fail('no node_id or node_instance_id')
        deployment_id = deployment_id or self.test_id
        filters = {'deployment_id': deployment_id}
        if compute_node_id:
            filters['node_id'] = compute_node_id
        if compute_node_instance_id:
            filters['id'] = compute_node_instance_id
        computes = self.client.node_instances.list(**filters).items
        if not computes:
            self.fail('No compute nodes were found')
        if len(computes) > 1:
            self.fail('More than one instance found, please refine your query:'
                      ' {0}'.format(computes))
        compute = computes[0]
        private_ip = compute.runtime_properties['ip']
        with self.manager_env_fabric() as api:
            api.sudo('ssh '
                     '-o UserKnownHostsFile=/dev/null '
                     '-o StrictHostKeyChecking=no '
                     '-t -i /root/.ssh/agent_key.pem {0}@{1} "{2}"'
                     .format(user, private_ip, ' && '.join(commands)))
class TwoManagersTest(HelloWorldBashTest):
    """
    This test bootstraps managers, installs helloworld using the first manager,
    checks whether it has been installed correctly, creates a snapshot,
    downloads it, uploads it to the second manager, uninstalls helloworld using
    the second manager, checks whether helloworld is not running indeed and
    tears down those managers.
    """
    def setUp(self):
        super(TwoManagersTest, self).setUp()
        self.workdir2 = tempfile.mkdtemp(prefix='cloudify-testenv-')

        self.cfy2 = CfyHelper(self.workdir2, testcase=self)
        second_manager_blueprint_path = '{}_existing'.format(
            self.env._manager_blueprint_path)

        shutil.copy2(self.env._manager_blueprint_path,
                     second_manager_blueprint_path)

        external_resources = [
            'node_templates.management_network.properties',
            'node_templates.management_subnet.properties',
            'node_templates.router.properties',
            'node_templates.agents_security_group.properties',
            'node_templates.management_security_group.properties',
        ]

        with YamlPatcher(second_manager_blueprint_path) as patch:
            for prop in external_resources:
                patch.merge_obj(prop, {'use_external_resource': True})

        second_cloudify_config_path = '{}_existing'.format(
            self.env.cloudify_config_path)

        shutil.copy2(self.env.cloudify_config_path,
                     second_cloudify_config_path)

        new_resources = ['manager_server_name', 'manager_port_name']

        with YamlPatcher(second_cloudify_config_path) as patch:
            for prop in new_resources:
                patch.append_value(prop, '2')

        self.cfy2.bootstrap(blueprint_path=second_manager_blueprint_path,
                            inputs_file=second_cloudify_config_path,
                            install_plugins=self.env.install_plugins,
                            keep_up_on_failure=False,
                            task_retries=5,
                            verbose=False)

        self.client2 = create_rest_client(self.cfy2.get_management_ip())

    def _start_execution_and_wait(self, client, deployment, workflow_id):
        execution = client.executions.start(deployment, workflow_id)
        self.wait_for_execution(execution, self.default_timeout, client)

    def _create_snapshot(self, client, name):
        self.wait_for_stop_dep_env_execution_to_end(self.test_id)
        execution = client.snapshots.create(name, False, False)
        self.wait_for_execution(execution, self.default_timeout, client)

    def _restore_snapshot(self, client, name):
        execution = client.snapshots.restore(name, True)
        self.wait_for_execution(execution, self.default_timeout, client)

    def _do_post_install_assertions(self):
        context = super(TwoManagersTest, self)._do_post_install_assertions()
        self.logger.info('Creating snapshot...')
        self._create_snapshot(self.client, self.test_id)
        try:
            self.client.snapshots.get(self.test_id)
        except CloudifyClientError as e:
            self.fail(e.message)
        self.logger.info('Snapshot created.')

        self.logger.info('Downloading snapshot...')
        snapshot_file_name = ''.join(
            random.choice(string.ascii_letters) for _ in xrange(10))
        snapshot_file_path = os.path.join('/tmp', snapshot_file_name)
        self.client.snapshots.download(self.test_id, snapshot_file_path)
        self.logger.info('Snapshot downloaded.')

        self.logger.info('Uploading snapshot to the second manager...')
        self.client2.snapshots.upload(snapshot_file_path, self.test_id)

        try:
            uploaded_snapshot = self.client2.snapshots.get(self.test_id)
            self.assertEqual(
                uploaded_snapshot.status, 'uploaded',
                "Snapshot {} has a wrong status: '{}' instead of 'uploaded'.".
                format(self.test_id, uploaded_snapshot.status))
        except CloudifyClientError as e:
            self.fail(e.message)
        self.logger.info('Snapshot uploaded.')

        self.logger.info('Removing snapshot file...')
        if os.path.isfile(snapshot_file_path):
            os.remove(snapshot_file_path)
        self.logger.info('Snapshot file removed.')

        self.logger.info('Restoring snapshot...')
        self._restore_snapshot(self.client2, self.test_id)
        try:
            self.client2.deployments.get(self.test_id)
        except CloudifyClientError as e:
            self.fail(e.message)
        self.logger.info('Snapshot restored.')

        self.logger.info('Installing new agents...')
        self._start_execution_and_wait(self.client2, self.test_id,
                                       'install_new_agents')
        self.logger.info('Installed new agents.')
        self.wait_for_stop_dep_env_execution_to_end(self.test_id,
                                                    client=self.client2)
        return context

    def execute_uninstall(self, deployment_id=None, cfy=None):
        super(TwoManagersTest, self).execute_uninstall(cfy=self.cfy2)

    def _assert_nodes_deleted(self):
        super(TwoManagersTest, self)._assert_nodes_deleted(self.client2)

    @property
    def default_timeout(self):
        return 1000
class ManagerUpgradeTest(TestCase):

    def test_manager_upgrade(self):
        """Bootstrap a manager, upgrade it, rollback it, examine the results.

        To test the manager in-place upgrade procedure:
            - bootstrap a manager (this is part of the system under test,
              does destructive changes to the manager, and need a known manager
              version: so, can't use the testenv manager)
            - deploy the hello world app
            - upgrade the manager, changing some inputs (eg. the port that
              Elasticsearch uses)
            - check that everything still works (the previous deployment still
              reports metrics; we can install another deployment)
            - rollback the manager
            - post-rollback checks: the changed inputs are now the original
              values again, the installed app still reports metrics
        """
        self.prepare_manager()

        self.preupgrade_deployment_id = self.deploy_hello_world('pre-')

        self.upgrade_manager()
        self.post_upgrade_checks()

        self.rollback_manager()
        self.post_rollback_checks()

        self.teardown_manager()

    @contextmanager
    def _manager_fabric_env(self):
        inputs = self.manager_inputs
        with fabric.context_managers.settings(
                host_string=self.upgrade_manager_ip,
                user=inputs['ssh_user'],
                key_filename=inputs['ssh_key_filename']):
            yield fabric.api

    def _bootstrap_local_env(self, workdir):
        storage = local.FileStorage(
            os.path.join(workdir, '.cloudify', 'bootstrap'))
        return local.load_env('manager', storage=storage)

    def _blueprint_rpm_versions(self, blueprint_path, inputs):
        """RPM filenames that should be installed on the manager.
        """
        env = local.init_env(
            blueprint_path,
            inputs=inputs,
            ignored_modules=cli_constants.IGNORED_LOCAL_WORKFLOW_MODULES)

        storage = env.storage

        amqp_influx_rpm = storage.get_node('amqp_influx')['properties'][
            'amqpinflux_rpm_source_url']
        restservice_rpm = storage.get_node('rest_service')['properties'][
            'rest_service_rpm_source_url']
        mgmtworker_rpm = storage.get_node('mgmt_worker')['properties'][
            'management_worker_rpm_source_url']
        return {
            'cloudify-amqp-influx': amqp_influx_rpm,
            'cloudify-rest-service': restservice_rpm,
            'cloudify-management-worker': mgmtworker_rpm
        }

    def _cloudify_rpm_versions(self):
        with self._manager_fabric_env() as fabric:
            return fabric.sudo('rpm -qa | grep cloudify')

    def check_rpm_versions(self, blueprint_path, inputs):
        blueprint_rpms = self._blueprint_rpm_versions(blueprint_path, inputs)
        installed_rpms = self._cloudify_rpm_versions()
        for service_name, rpm_filename in blueprint_rpms.items():
            for line in installed_rpms.split('\n'):
                line = line.strip()
                if line.startswith(service_name):
                    self.assertIn(line.strip(), rpm_filename)

    def prepare_manager(self):
        # note that we're using a separate manager checkout, so we need to
        # create our own utils like cfy and the rest client, rather than use
        # the testenv ones
        self.cfy_workdir = tempfile.mkdtemp(prefix='manager-upgrade-')
        self.addCleanup(shutil.rmtree, self.cfy_workdir)
        self.manager_cfy = CfyHelper(cfy_workdir=self.cfy_workdir)

        self.manager_inputs = self._get_bootstrap_inputs()

        self.bootstrap_manager()

        self.rest_client = create_rest_client(self.upgrade_manager_ip)

        self.bootstrap_manager_version = LooseVersion(
            self.rest_client.manager.get_version()['version'])

    def _get_bootstrap_inputs(self):
        prefix = self.test_id

        ssh_key_filename = os.path.join(self.workdir, 'manager.key')
        self.addCleanup(self.env.handler.remove_keypair,
                        prefix + '-manager-key')

        agent_key_path = os.path.join(self.workdir, 'agents.key')
        self.addCleanup(self.env.handler.remove_keypair,
                        prefix + '-agents-key')

        return {
            'keystone_username': self.env.keystone_username,
            'keystone_password': self.env.keystone_password,
            'keystone_tenant_name': self.env.keystone_tenant_name,
            'keystone_url': self.env.keystone_url,
            'region': self.env.region,
            'flavor_id': self.env.medium_flavor_id,
            'image_id': self.env.centos_7_image_id,

            'ssh_user': self.env.centos_7_image_user,
            'external_network_name': self.env.external_network_name,
            'resources_prefix': 'test-upgrade-',

            'manager_server_name': prefix + '-manager',

            # shared settings
            'manager_public_key_name': prefix + '-manager-key',
            'agent_public_key_name': prefix + '-agents-key',
            'ssh_key_filename': ssh_key_filename,
            'agent_private_key_path': agent_key_path,

            'management_network_name': prefix + '-network',
            'management_subnet_name': prefix + '-subnet',
            'management_router': prefix + '-router',

            'agents_user': '',

            # private settings
            'manager_security_group_name': prefix + '-m-sg',
            'agents_security_group_name': prefix + '-a-sg',
            'manager_port_name': prefix + '-port',
            'management_subnet_dns_nameservers': ['8.8.8.8', '8.8.4.4']
        }

    def get_bootstrap_blueprint(self):
        manager_repo_dir = tempfile.mkdtemp(prefix='manager-upgrade-')
        self.addCleanup(shutil.rmtree, manager_repo_dir)
        manager_repo = clone(BOOTSTRAP_REPO_URL,
                             manager_repo_dir,
                             branch=BOOTSTRAP_BRANCH)
        yaml_path = manager_repo / 'openstack-manager-blueprint.yaml'

        # allow the ports that we're going to connect to from the tests,
        # when doing checks
        for port in [8086, 9200, 9900]:
            secgroup_cfg = [{
                'port_range_min': port,
                'port_range_max': port,
                'remote_ip_prefix': '0.0.0.0/0'
            }]
            secgroup_cfg_path = 'node_templates.management_security_group'\
                '.properties.rules'
            with YamlPatcher(yaml_path) as patch:
                patch.append_value(secgroup_cfg_path, secgroup_cfg)

        return yaml_path

    def _load_private_ip_from_env(self, workdir):
        env = self._bootstrap_local_env(workdir)
        return env.outputs()['private_ip']

    def bootstrap_manager(self):
        self.bootstrap_blueprint = self.get_bootstrap_blueprint()
        inputs_path = self.manager_cfy._get_inputs_in_temp_file(
            self.manager_inputs, self._testMethodName)

        self.manager_cfy.bootstrap(self.bootstrap_blueprint,
                                   inputs_file=inputs_path)

        self.upgrade_manager_ip = self.manager_cfy.get_management_ip()
        self.manager_private_ip = self._load_private_ip_from_env(
            self.cfy_workdir)

        # TODO: why is this needed?
        self.manager_cfy.use(management_ip=self.upgrade_manager_ip)

    def deploy_hello_world(self, prefix=''):
        """Install the hello world app."""
        blueprint_id = prefix + self.test_id
        deployment_id = prefix + self.test_id
        hello_repo_dir = tempfile.mkdtemp(prefix='manager-upgrade-')
        hello_repo_path = clone(
            'https://github.com/cloudify-cosmo/'
            'cloudify-hello-world-example.git',
            hello_repo_dir
        )
        self.addCleanup(shutil.rmtree, hello_repo_dir)
        hello_blueprint_path = hello_repo_path / 'blueprint.yaml'
        self.manager_cfy.upload_blueprint(blueprint_id, hello_blueprint_path)

        inputs = {
            'agent_user': self.env.ubuntu_image_user,
            'image': self.env.ubuntu_trusty_image_name,
            'flavor': self.env.flavor_name
        }
        self.manager_cfy.create_deployment(blueprint_id, deployment_id,
                                           inputs=inputs)

        self.manager_cfy.execute_install(deployment_id=deployment_id)
        return deployment_id

    def get_upgrade_blueprint(self):
        repo_dir = tempfile.mkdtemp(prefix='manager-upgrade-')
        self.addCleanup(shutil.rmtree, repo_dir)
        upgrade_blueprint_path = clone(UPGRADE_REPO_URL,
                                       repo_dir,
                                       branch=UPGRADE_BRANCH)

        return upgrade_blueprint_path / 'simple-manager-blueprint.yaml'

    def upgrade_manager(self):
        self.upgrade_blueprint = self.get_upgrade_blueprint()

        # we're changing one of the ES inputs - make sure we also re-install ES
        with YamlPatcher(self.upgrade_blueprint) as patch:
            patch.set_value(
                ('node_templates.elasticsearch.properties'
                 '.use_existing_on_upgrade'),
                False)

        self.upgrade_inputs = {
            'private_ip': self.manager_private_ip,
            'public_ip': self.upgrade_manager_ip,
            'ssh_key_filename': self.manager_inputs['ssh_key_filename'],
            'ssh_user': self.manager_inputs['ssh_user'],
            'elasticsearch_endpoint_port': 9900

        }
        upgrade_inputs_file = self.manager_cfy._get_inputs_in_temp_file(
            self.upgrade_inputs, self._testMethodName)

        with self.manager_cfy.maintenance_mode():
            self.manager_cfy.upgrade_manager(
                blueprint_path=self.upgrade_blueprint,
                inputs_file=upgrade_inputs_file)

    def post_upgrade_checks(self):
        """To check if the upgrade succeeded:
            - fire a request to the REST service
            - check that elasticsearch is listening on the changed port
            - check that the pre-existing deployment still reports to influxdb
            - install a new deployment, check that it reports to influxdb,
              and uninstall it: to check that the manager still allows
              creating, installing and uninstalling deployments correctly
        """
        upgrade_manager_version = LooseVersion(
            self.rest_client.manager.get_version()['version'])
        self.assertGreaterEqual(upgrade_manager_version,
                                self.bootstrap_manager_version)
        self.check_rpm_versions(self.upgrade_blueprint, self.upgrade_inputs)

        self.rest_client.blueprints.list()
        self.check_elasticsearch(self.upgrade_manager_ip, 9900)
        self.check_influx(self.preupgrade_deployment_id)

        postupgrade_deployment_id = self.deploy_hello_world('post-')
        self.check_influx(postupgrade_deployment_id)
        self.uninstall_deployment(postupgrade_deployment_id)

    def check_influx(self, deployment_id):
        """Check that the deployment_id continues to report metrics.

        Look at the last 5 seconds worth of metrics. To avoid race conditions
        (running this check before the deployment even had a chance to report
        any metrics), first wait 5 seconds to allow some metrics to be
        gathered.
        """
        # TODO influx config should be pulled from props?
        time.sleep(5)
        influx_client = InfluxDBClient(self.upgrade_manager_ip, 8086,
                                       'root', 'root', 'cloudify')
        try:
            result = influx_client.query('select * from /^{0}\./i '
                                         'where time > now() - 5s'
                                         .format(deployment_id))
        except NameError as e:
            self.fail('monitoring events list for deployment with ID {0} were'
                      ' not found on influxDB. error is: {1}'
                      .format(deployment_id, e))

        self.assertTrue(len(result) > 0)

    def check_elasticsearch(self, host, port):
        """Check that elasticsearch is listening on the given host:port.

        This is used for checking if the ES port changed correctly during
        the upgrade.
        """
        try:
            response = urllib2.urlopen('http://{0}:{1}'.format(
                self.upgrade_manager_ip, port))
            response = json.load(response)
            if response['status'] != 200:
                raise ValueError('Incorrect status {0}'.format(
                    response['status']))
        except (ValueError, urllib2.URLError):
            self.fail('elasticsearch isnt listening on the changed port')

    def uninstall_deployment(self, deployment_id):
        self.manager_cfy.execute_uninstall(deployment_id)

    def rollback_manager(self):
        rollback_inputs = {
            'private_ip': self.manager_private_ip,
            'public_ip': self.upgrade_manager_ip,
            'ssh_key_filename': self.manager_inputs['ssh_key_filename'],
            'ssh_user': self.manager_inputs['ssh_user'],
        }
        rollback_inputs_file = self.manager_cfy._get_inputs_in_temp_file(
            rollback_inputs, self._testMethodName)

        with self.manager_cfy.maintenance_mode():
            self.manager_cfy.rollback_manager(
                blueprint_path=self.upgrade_blueprint,
                inputs_file=rollback_inputs_file)

    def post_rollback_checks(self):
        rollback_manager_version = LooseVersion(
            self.rest_client.manager.get_version()['version'])
        self.assertEqual(rollback_manager_version,
                         self.bootstrap_manager_version)
        self.check_rpm_versions(self.bootstrap_blueprint, self.manager_inputs)

        self.rest_client.blueprints.list()
        self.check_elasticsearch(self.upgrade_manager_ip, 9200)
        self.check_influx(self.preupgrade_deployment_id)

    def teardown_manager(self):
        self.manager_cfy.teardown(ignore_deployments=True)