def after_teardown(self): super(OpenstackHandler, self).after_teardown() if self.remove_agent_keypair: agent_key_path = get_actual_keypath(self.env, self.env.agent_key_path, raise_on_missing=False) if agent_key_path: os.remove(agent_key_path) if self.remove_management_keypair: management_key_path = get_actual_keypath(self.env, self.env.management_key_path, raise_on_missing=False) if management_key_path: os.remove(management_key_path)
def after_teardown(self): super(OpenstackHandler, self).after_teardown() if self.remove_agent_keypair: agent_key_path = get_actual_keypath(self.env, self.env.agent_key_path, raise_on_missing=False) if agent_key_path: os.remove(agent_key_path) if self.remove_management_keypair: management_key_path = get_actual_keypath( self.env, self.env.management_key_path, raise_on_missing=False) if management_key_path: os.remove(management_key_path)
def _update_fabric_env(self): fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': self.env.centos_7_image_user, 'key_filename': get_actual_keypath( self.env, self.env.management_key_path), 'host_string': self.env.management_ip})
def _reboot_server(self): env.update({ 'user': self.env.management_user_name, 'key_filename': get_actual_keypath(self.env, self.env.management_key_path), 'host_string': self.env.management_ip, }) reboot()
def _reboot_server(self): fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': self.env.centos_7_image_user, 'key_filename': get_actual_keypath(self.env, self.env.management_key_path), 'host_string': self.env.management_ip, }) return fabric.api.run('sudo shutdown -r +1')
def _setup_fabric_env(self): return { 'host_string': self.cfy.get_management_ip(), 'port': 22, 'user': self.env.management_user_name, 'key_filename': util.get_actual_keypath( self.env, self.env.management_key_path ), 'connection_attempts': 5 }
def _setup_fabric_env(self): return { 'host_string': self.cfy.get_management_ip(), 'port': 22, 'user': self.env.management_user_name, 'key_filename': util.get_actual_keypath(self.env, self.env.management_key_path), 'connection_attempts': 5 }
def _check_if_private_key_is_on_manager(self): manager_key_path = get_actual_keypath( self.env, self.env.management_key_path) fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': self.env.management_user_name, 'key_filename': manager_key_path, 'host_string': self.env.management_ip }) return fabric.contrib.files.exists(PRIVATE_KEY_PATH)
def _render_agent_key(_runtime_blueprint_directory): with open( util.get_actual_keypath(self.env, self.env.agent_key_path)) as f: key_content = f.read() key_template = resources.get_resource( 'host-pool-service-blueprint/keys/agent_key.pem.template') util.render_template_to_file( template_path=key_template, file_path=os.path.join(_runtime_blueprint_directory, 'keys', 'agent_key.pem'), agent_private_key_file_content=key_content)
def _is_docker_manager(self): manager_key_path = get_actual_keypath( self.env, self.env.management_key_path) fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': self.env.management_user_name, 'key_filename': manager_key_path, 'host_string': self.env.management_ip }) try: fabric.api.sudo('which docker') return True except SystemExit: return False
def _check_if_private_key_is_on_manager(self): path_to_check = PRIVATE_KEY_PATH manager_key_path = get_actual_keypath(self.env, self.env.management_key_path) fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': self.env.management_user_name, 'key_filename': manager_key_path, 'host_string': self.env.management_ip }) return fabric.contrib.files.exists(path_to_check)
def _render_agent_key(_runtime_blueprint_directory): with open(util.get_actual_keypath( self.env, self.env.agent_key_path)) as f: key_content = f.read() key_template = resources.get_resource( 'host-pool-service-blueprint/keys/agent_key.pem.template') util.render_template_to_file( template_path=key_template, file_path=os.path.join( _runtime_blueprint_directory, 'keys', 'agent_key.pem'), agent_private_key_file_content=key_content )
def setUp(self, *args, **kwargs): super(PuppetPluginAgentTest, self).setUp(*args, **kwargs) blueprint_dir = self.copy_blueprint('blueprints', get_resources_path()) self.blueprint_dir = blueprint_dir if 'CLOUDIFY_TEST_PUPPET_IP' in os.environ: self.logger.info('Using existing Puppet server at {0}'.format( os.environ['CLOUDIFY_TEST_PUPPET_IP'])) self.puppet_server_ip = os.environ['CLOUDIFY_TEST_PUPPET_IP'] self.puppet_server_id = None return self.logger.info('Setting up Puppet master') self.blueprint_yaml = (blueprint_dir / 'puppet-server-by-puppet-blueprint.yaml') with YamlPatcher(self.blueprint_yaml) as blueprint: bp_info, inputs = update_blueprint(self.env, blueprint, 'puppet-master') self.puppet_server_hostname = bp_info['hostnames'][0] self.puppet_server_id = self.test_id + '-puppet-master' id_ = self.puppet_server_id before, after = self.upload_deploy_and_execute_install( blueprint_id=id_, deployment_id=id_, inputs=inputs) fip_node = find_node_state('ip', after['node_state'][id_]) self.puppet_server_ip = \ fip_node['runtime_properties']['floating_ip_address'] fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': bp_info['users'][0], 'key_filename': get_actual_keypath(self.env, self.env.agent_key_path), 'host_string': self.puppet_server_ip, }) setup_puppet_server(blueprint_dir)
def setUp(self, *args, **kwargs): super(PuppetPluginAgentTest, self).setUp(*args, **kwargs) blueprint_dir = self.copy_blueprint( 'blueprints', get_resources_path()) self.blueprint_dir = blueprint_dir if 'CLOUDIFY_TEST_PUPPET_IP' in os.environ: self.logger.info('Using existing Puppet server at {0}'.format( os.environ['CLOUDIFY_TEST_PUPPET_IP'])) self.puppet_server_ip = os.environ['CLOUDIFY_TEST_PUPPET_IP'] self.puppet_server_id = None return self.logger.info('Setting up Puppet master') self.blueprint_yaml = ( blueprint_dir / 'puppet-server-by-puppet-blueprint.yaml') with YamlPatcher(self.blueprint_yaml) as blueprint: bp_info, inputs = update_blueprint(self.env, blueprint, 'puppet-master') self.puppet_server_hostname = bp_info['hostnames'][0] self.puppet_server_id = self.test_id + '-puppet-master' id_ = self.puppet_server_id before, after = self.upload_deploy_and_execute_install( blueprint_id=id_, deployment_id=id_, inputs=inputs) fip_node = find_node_state('ip', after['node_state'][id_]) self.puppet_server_ip = \ fip_node['runtime_properties']['floating_ip_address'] fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': bp_info['users'][0], 'key_filename': get_actual_keypath(self.env, self.env.agent_key_path), 'host_string': self.puppet_server_ip, }) setup_puppet_server(blueprint_dir)
def create_keypair_and_copy_to_manager(self, nova_client, remote_key_path, key_name): key_file = path(self.workdir) / '{}.pem'.format(key_name) keypair = nova_client.keypairs.create(key_name) key_file.write_text(keypair.private_key) key_file.chmod(0600) management_key_path = get_actual_keypath(self.env, self.env.management_key_path) fabric.api.env.update({ 'timeout': 30, 'user': self.env.cloudify_agent_user, 'key_filename': management_key_path, 'host_string': self.env.management_ip, }) fabric.api.put(local_path=key_file, remote_path=remote_key_path)
def _assert_plugins_installed(self): manager_key_path = util.get_actual_keypath( self.env, self.env.management_key_path) local_script_path = util.get_resource_path( 'scripts/test_rest_plugins.sh') remote_script_path = ('/home/{0}/test_rest_plugins.sh'.format( self.env.management_user_name)) with fabric_api.settings(timeout=30, user=self.env.management_user_name, key_filename=manager_key_path, host_string=self.cfy.get_management_ip(), warn_only=False): fabric_api.put(local_script_path, remote_script_path) output = fabric_api.run( 'chmod +x {0} && {0}'.format(remote_script_path)) # This tells us that plugin-template was successfully installed self.assertIn('imported_plugin_tasks', output) # This tells us that mock-rest-plugin was successfully installed self.assertIn('mock_attribute_value', output)
def _check_if_private_key_is_on_manager(self): if self._is_docker_manager(): path_to_check = '/home/{0}/neutron-test.pem'\ .format(self.env.management_user_name) else: path_to_check = PRIVATE_KEY_PATH manager_key_path = get_actual_keypath( self.env, self.env.management_key_path) fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': self.env.management_user_name, 'key_filename': manager_key_path, 'host_string': self.env.management_ip }) return fabric.contrib.files.exists(path_to_check)
def _assert_plugins_installed(self): manager_key_path = util.get_actual_keypath( self.env, self.env.management_key_path) local_script_path = util.get_resource_path( 'scripts/test_rest_plugins.sh') remote_script_path = ('/home/{0}/test_rest_plugins.sh' .format(self.env.management_user_name)) with fabric_api.settings( timeout=30, user=self.env.management_user_name, key_filename=manager_key_path, host_string=self.cfy.get_management_ip(), warn_only=False): fabric_api.put(local_script_path, remote_script_path) output = fabric_api.run( 'chmod +x {0} && {0}'.format(remote_script_path)) # This tells us that plugin-template was successfully installed self.assertIn('imported_plugin_tasks', output) # This tells us that mock-rest-plugin was successfully installed self.assertIn('mock_attribute_value', output)
def _is_docker_manager(self): manager_key_path = get_actual_keypath( self.env, self.env.management_key_path) fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': self.env.management_user_name, 'key_filename': manager_key_path, 'host_string': self.env.management_ip }) try: cmd = 'which docker' self.logger.info('Executing "{0}" on host: {1}@{2}'.format( cmd, self.env.management_user_name, self.env.management_ip)) fabric.api.sudo(cmd) return True except SystemExit: return False
def create_keypair_and_copy_to_manager(self, nova_client, remote_key_path, key_name): key_file = path(self.workdir) / '{}.pem'.format(key_name) keypair = nova_client.keypairs.create(key_name) key_file.write_text(keypair.private_key) key_file.chmod(0600) management_key_path = get_actual_keypath(self.env, self.env.management_key_path) fabric.api.env.update({ 'timeout': 30, 'user': self.env.cloudify_agent_user, 'key_filename': management_key_path, 'host_string': self.env.management_ip, }) if self._is_docker_manager(): fabric.api.run('mkdir -p /tmp/home') fabric.api.put(local_path=key_file, remote_path=remote_key_path)
def test_puppet_agent(self): blueprint_dir = self.blueprint_dir self.blueprint_yaml = (blueprint_dir / 'puppet-agent-test-blueprint.yaml') with YamlPatcher(self.blueprint_yaml) as blueprint: bp_info, inputs = update_blueprint( self.env, blueprint, 'puppet-agent', { 'puppet_server_ip': self.puppet_server_ip, }) id_ = self.test_id + '-puppet-agent-' + str(int(time.time())) before, after = self.upload_deploy_and_execute_install( blueprint_id=id_, deployment_id=id_, inputs=inputs) # import pdb; pdb.set_trace() fip_node = find_node_state('ip', after['node_state'][id_]) puppet_agent_ip = fip_node['runtime_properties']['floating_ip_address'] fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': bp_info['users'][0], 'key_filename': get_actual_keypath(self.env, self.env.agent_key_path), 'host_string': puppet_agent_ip, }) f = '/tmp/cloudify_operation_create' out = fabric.api.run('[ -f {0} ]; echo $?'.format(f)) self.assertEquals(out, '0') out = fabric.api.run('cat {0}'.format(f)) self.assertEquals(out, id_) self.execute_uninstall(id_)
def test_puppet_agent(self): blueprint_dir = self.blueprint_dir self.blueprint_yaml = ( blueprint_dir / 'puppet-agent-test-blueprint.yaml') with YamlPatcher(self.blueprint_yaml) as blueprint: bp_info, inputs = update_blueprint( self.env, blueprint, 'puppet-agent', { 'puppet_server_ip': self.puppet_server_ip, }) id_ = self.test_id + '-puppet-agent-' + str(int(time.time())) before, after = self.upload_deploy_and_execute_install( blueprint_id=id_, deployment_id=id_, inputs=inputs) # import pdb; pdb.set_trace() fip_node = find_node_state('ip', after['node_state'][id_]) puppet_agent_ip = fip_node['runtime_properties']['floating_ip_address'] fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': bp_info['users'][0], 'key_filename': get_actual_keypath(self.env, self.env.agent_key_path), 'host_string': puppet_agent_ip, }) f = '/tmp/cloudify_operation_create' out = fabric.api.run('[ -f {0} ]; echo $?'.format(f)) self.assertEquals(out, '0') out = fabric.api.run('cat {0}'.format(f)) self.assertEquals(out, id_) self.execute_uninstall(id_)
def test_chef_solo(self): agent_key_file = get_actual_keypath(self.env, self.env.agent_key_path) blueprint_dir = self.blueprint_dir self.blueprint_yaml = blueprint_dir / 'chef-solo-test-blueprint.yaml' with YamlPatcher(self.blueprint_yaml) as blueprint: bp_info, inputs = update_blueprint(self.env, blueprint, 'chef-solo') id_ = self.test_id + '-chef-solo-' + str(int(time.time())) before, after = self.upload_deploy_and_execute_install(id_, id_, inputs=inputs) fip_node = find_node_state('ip', after['node_state'][id_]) chef_solo_ip = fip_node['runtime_properties']['floating_ip_address'] fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': bp_info['users'][0], 'key_filename': str(agent_key_file), 'host_string': chef_solo_ip, }) expected_files_contents = ( ('/tmp/blueprint.txt', 'Great success number #2 !'), ('/tmp/blueprint2.txt', '/tmp/blueprint.txt'), ('/tmp/chef_node_env.e1.txt', 'env:e1'), ('/tmp/chef_node_data_bag_user.db1.i1.txt', 'db1-i1-k1'), ) for file_name, expected_content in expected_files_contents: actual_content = fabric.api.run('cat ' + file_name) msg = "File '{0}' should have content '{1}' but has '{2}'".format( file_name, expected_content, actual_content) self.assertEquals(actual_content, expected_content, msg) self.execute_uninstall(id_)
def test_chef_solo(self): agent_key_file = get_actual_keypath(self.env, self.env.agent_key_path) blueprint_dir = self.blueprint_dir self.blueprint_yaml = blueprint_dir / 'chef-solo-test-blueprint.yaml' with YamlPatcher(self.blueprint_yaml) as blueprint: bp_info, inputs = update_blueprint(self.env, blueprint, 'chef-solo') id_ = self.test_id + '-chef-solo-' + str(int(time.time())) before, after = self.upload_deploy_and_execute_install( id_, id_, inputs=inputs) fip_node = find_node_state('ip', after['node_state'][id_]) chef_solo_ip = fip_node['runtime_properties']['floating_ip_address'] fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': bp_info['users'][0], 'key_filename': str(agent_key_file), 'host_string': chef_solo_ip, }) expected_files_contents = ( ('/tmp/blueprint.txt', 'Great success number #2 !'), ('/tmp/blueprint2.txt', '/tmp/blueprint.txt'), ('/tmp/chef_node_env.e1.txt', 'env:e1'), ('/tmp/chef_node_data_bag_user.db1.i1.txt', 'db1-i1-k1'), ) for file_name, expected_content in expected_files_contents: actual_content = fabric.api.run('cat ' + file_name) msg = "File '{0}' should have content '{1}' but has '{2}'".format( file_name, expected_content, actual_content) self.assertEquals(actual_content, expected_content, msg) self.execute_uninstall(id_)
def tearDown(self): # note that the cleanup function is registered in setUp # because it is called regardless of whether setUp succeeded or failed # unlike tearDown which is not called when setUp fails (which might # happen when tests override setUp) if self.env.management_ip: try: self.logger.info('Running ps aux on Cloudify manager...') output = StringIO() with fabric_api.settings( user=self.env.management_user_name, host_string=self.env.management_ip, key_filename=get_actual_keypath( self.env, self.env.management_key_path), disable_known_hosts=True): fabric_api.run('ps aux --sort -rss', stdout=output) self.logger.info( 'Cloudify manager ps aux output:\n{0}'.format( output.getvalue())) except Exception as e: self.logger.info( 'Error running ps aux on Cloudify manager: {0}'.format( str(e)))
def setUp(self, *args, **kwargs): super(ChefPluginClientTest, self).setUp(*args, **kwargs) agent_key_file = get_actual_keypath(self.env, self.env.agent_key_path) blueprint_dir = self.copy_blueprint('chef-plugin') self.blueprint_yaml = (blueprint_dir / 'chef-server-by-chef-solo-blueprint.yaml') with YamlPatcher(self.blueprint_yaml) as blueprint: bp_info, inputs = update_blueprint(self.env, blueprint, 'chef-server') self.chef_server_hostname = '{0}{1}'.format( self.env.resources_prefix.replace('_', '-'), bp_info['hostnames'][0]) cookbooks_dir = blueprint_dir / 'cookbooks' def run(*args, **kwargs): return subprocess.check_output(*args, **kwargs) with cookbooks_dir: run([ 'wget', '-q', '-O', 'chef-server.zip', CHEF_SERVER_COOKBOOK_ZIP_URL, ]) ZipFile('chef-server.zip').extractall() chef_cookbook_dir = cookbooks_dir.glob('chef-server-*')[0] run(['mv', chef_cookbook_dir, 'chef-server']) # Next line because Chef cookbooks are required # to declare all dependencies, even if they don't use them. # We don't need git, it's only used in chef-cookbook::dev recipe. run(['sed', '-i', "/depends 'git'/d", 'chef-server/metadata.rb']) with blueprint_dir: run(['tar', 'czf', 'cookbooks.tar.gz', 'cookbooks']) self.chef_server_id = self.test_id + '-chef-server' id_ = self.chef_server_id before, after = self.upload_deploy_and_execute_install(id_, id_, inputs=inputs) fip_node = find_node_state('ip', after['node_state'][id_]) self.chef_server_ip = fip_node['runtime_properties'][ 'floating_ip_address'] fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': bp_info['users'][0], 'key_filename': str(agent_key_file), 'host_string': self.chef_server_ip, }) cookbook_local_path = os.path.abspath( os.path.join(get_blueprint_path('chef-plugin'), 'cookbook-create-file.tar.gz')) setup_chef_server(blueprint_dir, [[ 'create-file', cookbook_local_path, ]]) self.blueprint_dir = blueprint_dir
def setUp(self, *args, **kwargs): super(ChefPluginClientTest, self).setUp(*args, **kwargs) agent_key_file = get_actual_keypath(self.env, self.env.agent_key_path) blueprint_dir = self.copy_blueprint('chef-plugin') self.blueprint_yaml = ( blueprint_dir / 'chef-server-by-chef-solo-blueprint.yaml') with YamlPatcher(self.blueprint_yaml) as blueprint: bp_info, inputs = update_blueprint(self.env, blueprint, 'chef-server') self.chef_server_hostname = '{0}{1}'.format( self.env.resources_prefix.replace('_', '-'), bp_info['hostnames'][0]) cookbooks_dir = blueprint_dir / 'cookbooks' def run(*args, **kwargs): return subprocess.check_output(*args, **kwargs) with cookbooks_dir: run([ 'wget', '-q', '-O', 'chef-server.zip', CHEF_SERVER_COOKBOOK_ZIP_URL, ]) ZipFile('chef-server.zip').extractall() chef_cookbook_dir = cookbooks_dir.glob('chef-server-*')[0] run(['mv', chef_cookbook_dir, 'chef-server']) # Next line because Chef cookbooks are required # to declare all dependencies, even if they don't use them. # We don't need git, it's only used in chef-cookbook::dev recipe. run(['sed', '-i', "/depends 'git'/d", 'chef-server/metadata.rb']) with blueprint_dir: run(['tar', 'czf', 'cookbooks.tar.gz', 'cookbooks']) self.chef_server_id = self.test_id + '-chef-server' id_ = self.chef_server_id before, after = self.upload_deploy_and_execute_install( id_, id_, inputs=inputs) fip_node = find_node_state('ip', after['node_state'][id_]) self.chef_server_ip = fip_node['runtime_properties'][ 'floating_ip_address'] fabric_env = fabric.api.env fabric_env.update({ 'timeout': 30, 'user': bp_info['users'][0], 'key_filename': str(agent_key_file), 'host_string': self.chef_server_ip, }) cookbook_local_path = os.path.abspath( os.path.join(get_blueprint_path('chef-plugin'), 'cookbook-create-file.tar.gz')) setup_chef_server(blueprint_dir, [[ 'create-file', cookbook_local_path, ]]) self.blueprint_dir = blueprint_dir