def bootstrap(self): if self._management_running: return self._global_cleanup_context = self.handler.CleanupContext( 'testenv', self) cfy = CfyHelper(cfy_workdir=self._workdir) self.handler.before_bootstrap() if self.is_provider_bootstrap: cfy.bootstrap_with_providers( self.cloudify_config_path, self.handler.provider, keep_up_on_failure=False, verbose=True, dev_mode=False) else: install_plugins = self._get_boolean_env_var( INSTALL_MANAGER_BLUEPRINT_DEPENDENCIES, True) cfy.bootstrap( self._manager_blueprint_path, inputs_file=self.cloudify_config_path, install_plugins=install_plugins, keep_up_on_failure=False, verbose=True) self._running_env_setup(cfy.get_management_ip()) self.handler.after_bootstrap(cfy.get_provider_context())
def bootstrap(self, task_retries=5): if self._management_running: return self._global_cleanup_context = self.handler.CleanupContext( 'testenv', self) cfy = CfyHelper(cfy_workdir=self._workdir) self.handler.before_bootstrap() cfy.bootstrap( self._manager_blueprint_path, inputs_file=self.cloudify_config_path, install_plugins=self.install_plugins, keep_up_on_failure=False, task_retries=task_retries, verbose=True) self._running_env_setup(cfy.get_management_ip()) self.handler.after_bootstrap(cfy.get_provider_context())
def bootstrap(self, task_retries=5): if self._management_running: return self._global_cleanup_context = self.handler.CleanupContext( 'testenv', self) cfy = CfyHelper(cfy_workdir=self._workdir) self.handler.before_bootstrap() cfy.bootstrap( self._manager_blueprint_path, inputs_file=self.cloudify_config_path, install_plugins=self.install_plugins, keep_up_on_failure=False, task_retries=task_retries, verbose=True) self._running_env_setup(cfy.get_management_ip()) self.handler.after_bootstrap(cfy.get_provider_context())
class TwoManagersTest(HelloWorldBashTest): """ This test bootstraps managers, installs helloworld using the first manager, checks whether it has been installed correctly, creates a snapshot, downloads it, uploads it to the second manager, uninstalls helloworld using the second manager, checks whether helloworld is not running indeed and tears down those managers. """ def setUp(self): super(TwoManagersTest, self).setUp() self.workdir2 = tempfile.mkdtemp(prefix='cloudify-testenv-') self.cfy2 = CfyHelper(self.workdir2, testcase=self) second_manager_blueprint_path = '{}_existing'.format( self.env._manager_blueprint_path) shutil.copy2(self.env._manager_blueprint_path, second_manager_blueprint_path) external_resources = [ 'node_templates.management_network.properties', 'node_templates.management_subnet.properties', 'node_templates.router.properties', 'node_templates.agents_security_group.properties', 'node_templates.management_security_group.properties', ] with YamlPatcher(second_manager_blueprint_path) as patch: for prop in external_resources: patch.merge_obj(prop, {'use_external_resource': True}) second_cloudify_config_path = '{}_existing'.format( self.env.cloudify_config_path) shutil.copy2(self.env.cloudify_config_path, second_cloudify_config_path) new_resources = ['manager_server_name', 'manager_port_name'] with YamlPatcher(second_cloudify_config_path) as patch: for prop in new_resources: patch.append_value(prop, '2') self.cfy2.bootstrap(blueprint_path=second_manager_blueprint_path, inputs_file=second_cloudify_config_path, install_plugins=self.env.install_plugins, keep_up_on_failure=False, task_retries=5, verbose=False) self.client2 = create_rest_client(self.cfy2.get_management_ip()) def _start_execution_and_wait(self, client, deployment, workflow_id): execution = client.executions.start(deployment, workflow_id) self.wait_for_execution(execution, self.default_timeout, client) def _create_snapshot(self, client, name): self.wait_for_stop_dep_env_execution_to_end(self.test_id) execution = client.snapshots.create(name, False, False) self.wait_for_execution(execution, self.default_timeout, client) def _restore_snapshot(self, client, name): execution = client.snapshots.restore(name, True) self.wait_for_execution(execution, self.default_timeout, client) def _do_post_install_assertions(self): context = super(TwoManagersTest, self)._do_post_install_assertions() self.logger.info('Creating snapshot...') self._create_snapshot(self.client, self.test_id) try: self.client.snapshots.get(self.test_id) except CloudifyClientError as e: self.fail(e.message) self.logger.info('Snapshot created.') self.logger.info('Downloading snapshot...') snapshot_file_name = ''.join( random.choice(string.ascii_letters) for _ in xrange(10)) snapshot_file_path = os.path.join('/tmp', snapshot_file_name) self.client.snapshots.download(self.test_id, snapshot_file_path) self.logger.info('Snapshot downloaded.') self.logger.info('Uploading snapshot to the second manager...') self.client2.snapshots.upload(snapshot_file_path, self.test_id) try: uploaded_snapshot = self.client2.snapshots.get(self.test_id) self.assertEqual( uploaded_snapshot.status, 'uploaded', "Snapshot {} has a wrong status: '{}' instead of 'uploaded'.". format(self.test_id, uploaded_snapshot.status)) except CloudifyClientError as e: self.fail(e.message) self.logger.info('Snapshot uploaded.') self.logger.info('Removing snapshot file...') if os.path.isfile(snapshot_file_path): os.remove(snapshot_file_path) self.logger.info('Snapshot file removed.') self.logger.info('Restoring snapshot...') self._restore_snapshot(self.client2, self.test_id) try: self.client2.deployments.get(self.test_id) except CloudifyClientError as e: self.fail(e.message) self.logger.info('Snapshot restored.') self.logger.info('Installing new agents...') self._start_execution_and_wait(self.client2, self.test_id, 'install_new_agents') self.logger.info('Installed new agents.') self.wait_for_stop_dep_env_execution_to_end(self.test_id, client=self.client2) return context def execute_uninstall(self, deployment_id=None, cfy=None): super(TwoManagersTest, self).execute_uninstall(cfy=self.cfy2) def _assert_nodes_deleted(self): super(TwoManagersTest, self)._assert_nodes_deleted(self.client2) @property def default_timeout(self): return 1000
class TwoManagersTest(OpenStackNodeCellarTest): """ This test bootstraps managers, installs nodecellar using the first manager, checks whether it has been installed correctly, creates a snapshot, downloads it, uploads it to the second manager, uninstalls nodecellar using the second manager, checks whether nodecellar is not running indeed and tears down those managers. """ def setUp(self): super(TwoManagersTest, self).setUp() self.workdir2 = tempfile.mkdtemp(prefix='cloudify-testenv-') self.cfy2 = CfyHelper(self.workdir2, testcase=self) second_manager_blueprint_path = '{}_existing'.format( self.env._manager_blueprint_path) shutil.copy2(self.env._manager_blueprint_path, second_manager_blueprint_path) external_resources = [ 'node_templates.management_network.properties', 'node_templates.management_subnet.properties', 'node_templates.router.properties', 'node_templates.agents_security_group.properties', 'node_templates.management_security_group.properties', ] with YamlPatcher(second_manager_blueprint_path) as patch: for prop in external_resources: patch.merge_obj(prop, {'use_external_resource': True}) second_cloudify_config_path = '{}_existing'.format( self.env.cloudify_config_path) shutil.copy2(self.env.cloudify_config_path, second_cloudify_config_path) new_resources = ['manager_server_name', 'manager_port_name'] with YamlPatcher(second_cloudify_config_path) as patch: for prop in new_resources: patch.append_value(prop, '2') self.cfy2.bootstrap( blueprint_path=second_manager_blueprint_path, inputs_file=second_cloudify_config_path, install_plugins=self.env.install_plugins, keep_up_on_failure=False, task_retries=5, verbose=False ) self.client2 = create_rest_client(self.cfy2.get_management_ip()) def _start_execution_and_wait(self, client, deployment, workflow_id): execution = client.executions.start(deployment, workflow_id) self.wait_for_execution(execution, self.default_timeout, client) def _create_snapshot(self, client, name): self.wait_for_stop_dep_env_execution_to_end(self.test_id) execution = client.snapshots.create(name, False, False) self.wait_for_execution(execution, self.default_timeout, client) def _restore_snapshot(self, client, name): execution = client.snapshots.restore(name, True) self.wait_for_execution(execution, self.default_timeout, client) def on_nodecellar_installed(self): self.logger.info('Creating snapshot...') self._create_snapshot(self.client, self.test_id) try: self.client.snapshots.get(self.test_id) except CloudifyClientError as e: self.fail(e.message) self.logger.info('Snapshot created.') self.logger.info('Downloading snapshot...') snapshot_file_name = ''.join(random.choice(string.ascii_letters) for _ in xrange(10)) snapshot_file_path = os.path.join('/tmp', snapshot_file_name) self.client.snapshots.download(self.test_id, snapshot_file_path) self.logger.info('Snapshot downloaded.') self.logger.info('Uploading snapshot to the second manager...') self.client2.snapshots.upload(snapshot_file_path, self.test_id) try: uploaded_snapshot = self.client2.snapshots.get( self.test_id) self.assertEqual( uploaded_snapshot.status, 'uploaded', "Snapshot {} has a wrong status: '{}' instead of 'uploaded'." .format(self.test_id, uploaded_snapshot.status) ) except CloudifyClientError as e: self.fail(e.message) self.logger.info('Snapshot uploaded.') self.logger.info('Removing snapshot file...') if os.path.isfile(snapshot_file_path): os.remove(snapshot_file_path) self.logger.info('Snapshot file removed.') self.logger.info('Restoring snapshot...') self._restore_snapshot(self.client2, self.test_id) try: self.client2.deployments.get(self.test_id) except CloudifyClientError as e: self.fail(e.message) self.logger.info('Snapshot restored.') self.logger.info('Installing new agents...') self._start_execution_and_wait(self.client2, self.test_id, 'install_new_agents') self.logger.info('Installed new agents.') self.wait_for_stop_dep_env_execution_to_end(self.test_id, client=self.client2) def execute_uninstall(self, deployment_id=None, cfy=None): super(TwoManagersTest, self).execute_uninstall( cfy=self.cfy2) def post_uninstall_assertions(self, client=None): super(TwoManagersTest, self).post_uninstall_assertions( self.client2) @property def default_timeout(self): return 1000
class ManagerUpgradeTest(TestCase): def test_manager_upgrade(self): """Bootstrap a manager, upgrade it, rollback it, examine the results. To test the manager in-place upgrade procedure: - bootstrap a manager (this is part of the system under test, does destructive changes to the manager, and need a known manager version: so, can't use the testenv manager) - deploy the hello world app - upgrade the manager, changing some inputs (eg. the port that Elasticsearch uses) - check that everything still works (the previous deployment still reports metrics; we can install another deployment) - rollback the manager - post-rollback checks: the changed inputs are now the original values again, the installed app still reports metrics """ self.prepare_manager() self.preupgrade_deployment_id = self.deploy_hello_world('pre-') self.upgrade_manager() self.post_upgrade_checks() self.rollback_manager() self.post_rollback_checks() self.teardown_manager() @contextmanager def _manager_fabric_env(self): inputs = self.manager_inputs with fabric.context_managers.settings( host_string=self.upgrade_manager_ip, user=inputs['ssh_user'], key_filename=inputs['ssh_key_filename']): yield fabric.api def _bootstrap_local_env(self, workdir): storage = local.FileStorage( os.path.join(workdir, '.cloudify', 'bootstrap')) return local.load_env('manager', storage=storage) def _blueprint_rpm_versions(self, blueprint_path, inputs): """RPM filenames that should be installed on the manager. """ env = local.init_env( blueprint_path, inputs=inputs, ignored_modules=cli_constants.IGNORED_LOCAL_WORKFLOW_MODULES) storage = env.storage amqp_influx_rpm = storage.get_node('amqp_influx')['properties'][ 'amqpinflux_rpm_source_url'] restservice_rpm = storage.get_node('rest_service')['properties'][ 'rest_service_rpm_source_url'] mgmtworker_rpm = storage.get_node('mgmt_worker')['properties'][ 'management_worker_rpm_source_url'] return { 'cloudify-amqp-influx': amqp_influx_rpm, 'cloudify-rest-service': restservice_rpm, 'cloudify-management-worker': mgmtworker_rpm } def _cloudify_rpm_versions(self): with self._manager_fabric_env() as fabric: return fabric.sudo('rpm -qa | grep cloudify') def check_rpm_versions(self, blueprint_path, inputs): blueprint_rpms = self._blueprint_rpm_versions(blueprint_path, inputs) installed_rpms = self._cloudify_rpm_versions() for service_name, rpm_filename in blueprint_rpms.items(): for line in installed_rpms.split('\n'): line = line.strip() if line.startswith(service_name): self.assertIn(line.strip(), rpm_filename) def prepare_manager(self): # note that we're using a separate manager checkout, so we need to # create our own utils like cfy and the rest client, rather than use # the testenv ones self.cfy_workdir = tempfile.mkdtemp(prefix='manager-upgrade-') self.addCleanup(shutil.rmtree, self.cfy_workdir) self.manager_cfy = CfyHelper(cfy_workdir=self.cfy_workdir) self.manager_inputs = self._get_bootstrap_inputs() self.bootstrap_manager() self.rest_client = create_rest_client(self.upgrade_manager_ip) self.bootstrap_manager_version = LooseVersion( self.rest_client.manager.get_version()['version']) def _get_bootstrap_inputs(self): prefix = self.test_id ssh_key_filename = os.path.join(self.workdir, 'manager.key') self.addCleanup(self.env.handler.remove_keypair, prefix + '-manager-key') agent_key_path = os.path.join(self.workdir, 'agents.key') self.addCleanup(self.env.handler.remove_keypair, prefix + '-agents-key') return { 'keystone_username': self.env.keystone_username, 'keystone_password': self.env.keystone_password, 'keystone_tenant_name': self.env.keystone_tenant_name, 'keystone_url': self.env.keystone_url, 'region': self.env.region, 'flavor_id': self.env.medium_flavor_id, 'image_id': self.env.centos_7_image_id, 'ssh_user': self.env.centos_7_image_user, 'external_network_name': self.env.external_network_name, 'resources_prefix': 'test-upgrade-', 'manager_server_name': prefix + '-manager', # shared settings 'manager_public_key_name': prefix + '-manager-key', 'agent_public_key_name': prefix + '-agents-key', 'ssh_key_filename': ssh_key_filename, 'agent_private_key_path': agent_key_path, 'management_network_name': prefix + '-network', 'management_subnet_name': prefix + '-subnet', 'management_router': prefix + '-router', 'agents_user': '', # private settings 'manager_security_group_name': prefix + '-m-sg', 'agents_security_group_name': prefix + '-a-sg', 'manager_port_name': prefix + '-port', 'management_subnet_dns_nameservers': ['8.8.8.8', '8.8.4.4'] } def get_bootstrap_blueprint(self): manager_repo_dir = tempfile.mkdtemp(prefix='manager-upgrade-') self.addCleanup(shutil.rmtree, manager_repo_dir) manager_repo = clone(BOOTSTRAP_REPO_URL, manager_repo_dir, branch=BOOTSTRAP_BRANCH) yaml_path = manager_repo / 'openstack-manager-blueprint.yaml' # allow the ports that we're going to connect to from the tests, # when doing checks for port in [8086, 9200, 9900]: secgroup_cfg = [{ 'port_range_min': port, 'port_range_max': port, 'remote_ip_prefix': '0.0.0.0/0' }] secgroup_cfg_path = 'node_templates.management_security_group'\ '.properties.rules' with YamlPatcher(yaml_path) as patch: patch.append_value(secgroup_cfg_path, secgroup_cfg) return yaml_path def _load_private_ip_from_env(self, workdir): env = self._bootstrap_local_env(workdir) return env.outputs()['private_ip'] def bootstrap_manager(self): self.bootstrap_blueprint = self.get_bootstrap_blueprint() inputs_path = self.manager_cfy._get_inputs_in_temp_file( self.manager_inputs, self._testMethodName) self.manager_cfy.bootstrap(self.bootstrap_blueprint, inputs_file=inputs_path) self.upgrade_manager_ip = self.manager_cfy.get_management_ip() self.manager_private_ip = self._load_private_ip_from_env( self.cfy_workdir) # TODO: why is this needed? self.manager_cfy.use(management_ip=self.upgrade_manager_ip) def deploy_hello_world(self, prefix=''): """Install the hello world app.""" blueprint_id = prefix + self.test_id deployment_id = prefix + self.test_id hello_repo_dir = tempfile.mkdtemp(prefix='manager-upgrade-') hello_repo_path = clone( 'https://github.com/cloudify-cosmo/' 'cloudify-hello-world-example.git', hello_repo_dir ) self.addCleanup(shutil.rmtree, hello_repo_dir) hello_blueprint_path = hello_repo_path / 'blueprint.yaml' self.manager_cfy.upload_blueprint(blueprint_id, hello_blueprint_path) inputs = { 'agent_user': self.env.ubuntu_image_user, 'image': self.env.ubuntu_trusty_image_name, 'flavor': self.env.flavor_name } self.manager_cfy.create_deployment(blueprint_id, deployment_id, inputs=inputs) self.manager_cfy.execute_install(deployment_id=deployment_id) return deployment_id def get_upgrade_blueprint(self): repo_dir = tempfile.mkdtemp(prefix='manager-upgrade-') self.addCleanup(shutil.rmtree, repo_dir) upgrade_blueprint_path = clone(UPGRADE_REPO_URL, repo_dir, branch=UPGRADE_BRANCH) return upgrade_blueprint_path / 'simple-manager-blueprint.yaml' def upgrade_manager(self): self.upgrade_blueprint = self.get_upgrade_blueprint() # we're changing one of the ES inputs - make sure we also re-install ES with YamlPatcher(self.upgrade_blueprint) as patch: patch.set_value( ('node_templates.elasticsearch.properties' '.use_existing_on_upgrade'), False) self.upgrade_inputs = { 'private_ip': self.manager_private_ip, 'public_ip': self.upgrade_manager_ip, 'ssh_key_filename': self.manager_inputs['ssh_key_filename'], 'ssh_user': self.manager_inputs['ssh_user'], 'elasticsearch_endpoint_port': 9900 } upgrade_inputs_file = self.manager_cfy._get_inputs_in_temp_file( self.upgrade_inputs, self._testMethodName) with self.manager_cfy.maintenance_mode(): self.manager_cfy.upgrade_manager( blueprint_path=self.upgrade_blueprint, inputs_file=upgrade_inputs_file) def post_upgrade_checks(self): """To check if the upgrade succeeded: - fire a request to the REST service - check that elasticsearch is listening on the changed port - check that the pre-existing deployment still reports to influxdb - install a new deployment, check that it reports to influxdb, and uninstall it: to check that the manager still allows creating, installing and uninstalling deployments correctly """ upgrade_manager_version = LooseVersion( self.rest_client.manager.get_version()['version']) self.assertGreaterEqual(upgrade_manager_version, self.bootstrap_manager_version) self.check_rpm_versions(self.upgrade_blueprint, self.upgrade_inputs) self.rest_client.blueprints.list() self.check_elasticsearch(self.upgrade_manager_ip, 9900) self.check_influx(self.preupgrade_deployment_id) postupgrade_deployment_id = self.deploy_hello_world('post-') self.check_influx(postupgrade_deployment_id) self.uninstall_deployment(postupgrade_deployment_id) def check_influx(self, deployment_id): """Check that the deployment_id continues to report metrics. Look at the last 5 seconds worth of metrics. To avoid race conditions (running this check before the deployment even had a chance to report any metrics), first wait 5 seconds to allow some metrics to be gathered. """ # TODO influx config should be pulled from props? time.sleep(5) influx_client = InfluxDBClient(self.upgrade_manager_ip, 8086, 'root', 'root', 'cloudify') try: result = influx_client.query('select * from /^{0}\./i ' 'where time > now() - 5s' .format(deployment_id)) except NameError as e: self.fail('monitoring events list for deployment with ID {0} were' ' not found on influxDB. error is: {1}' .format(deployment_id, e)) self.assertTrue(len(result) > 0) def check_elasticsearch(self, host, port): """Check that elasticsearch is listening on the given host:port. This is used for checking if the ES port changed correctly during the upgrade. """ try: response = urllib2.urlopen('http://{0}:{1}'.format( self.upgrade_manager_ip, port)) response = json.load(response) if response['status'] != 200: raise ValueError('Incorrect status {0}'.format( response['status'])) except (ValueError, urllib2.URLError): self.fail('elasticsearch isnt listening on the changed port') def uninstall_deployment(self, deployment_id): self.manager_cfy.execute_uninstall(deployment_id) def rollback_manager(self): rollback_inputs = { 'private_ip': self.manager_private_ip, 'public_ip': self.upgrade_manager_ip, 'ssh_key_filename': self.manager_inputs['ssh_key_filename'], 'ssh_user': self.manager_inputs['ssh_user'], } rollback_inputs_file = self.manager_cfy._get_inputs_in_temp_file( rollback_inputs, self._testMethodName) with self.manager_cfy.maintenance_mode(): self.manager_cfy.rollback_manager( blueprint_path=self.upgrade_blueprint, inputs_file=rollback_inputs_file) def post_rollback_checks(self): rollback_manager_version = LooseVersion( self.rest_client.manager.get_version()['version']) self.assertEqual(rollback_manager_version, self.bootstrap_manager_version) self.check_rpm_versions(self.bootstrap_blueprint, self.manager_inputs) self.rest_client.blueprints.list() self.check_elasticsearch(self.upgrade_manager_ip, 9200) self.check_influx(self.preupgrade_deployment_id) def teardown_manager(self): self.manager_cfy.teardown(ignore_deployments=True)