def _test_userdata_agent(cfy, manager, inputs, tenant): blueprint_id = deployment_id = 'userdata{0}'.format(time.time()) blueprint_path = util.get_resource_path( 'agent/userdata-agent-blueprint/userdata-agent-blueprint.yaml') with set_client_tenant(manager, tenant): manager.client.blueprints.upload(blueprint_path, blueprint_id) manager.client.deployments.create( deployment_id, blueprint_id, inputs=inputs, skip_plugins_validation=True) cfy.executions.start.install(['-d', deployment_id, '--tenant-name', tenant]) try: with set_client_tenant(manager, tenant): assert { 'MY_ENV_VAR': 'MY_ENV_VAR_VALUE', 'file_content': EXPECTED_FILE_CONTENT } == manager.client.deployments.outputs.get(deployment_id).outputs finally: cfy.executions.start.uninstall(['-d', deployment_id, '--tenant-name', tenant])
def _test_agent_alive_after_reboot(cfy, manager, attributes, os_name, suffix=None): suffix = suffix or os_name os_blueprints = { 'centos_7': 'agent/reboot-vm-blueprint/reboot-unix-vm-blueprint.yaml', 'ubuntu_14_04': ( 'agent/reboot-vm-blueprint/reboot-unix-vm-blueprint.yaml' ), 'windows_2012': ( 'agent/reboot-vm-blueprint/reboot-winrm-vm-blueprint.yaml' ), } blueprint_name = os_blueprints[os_name] tenant = prepare_and_get_test_tenant(suffix, manager, cfy) inputs = { 'image': attributes['{os}_image_name'.format(os=os_name)], 'flavor': attributes['medium_flavor_name'], 'user': attributes['{os}_username'.format(os=os_name)], 'network_name': attributes['network_name'], 'private_key_path': manager.remote_private_key_path, 'keypair_name': attributes['keypair_name'], } blueprint_path = util.get_resource_path(blueprint_name) inputs['value'] = os_name blueprint_id = deployment_id = os_name with set_client_tenant(manager, tenant): manager.client.blueprints.upload(blueprint_path, blueprint_id) manager.client.deployments.create( deployment_id, blueprint_id, inputs=inputs, skip_plugins_validation=True) try: cfy.executions.start.install(['-d', deployment_id, '--tenant-name', tenant]) cfy.executions.start.execute_operation( deployment_id=deployment_id, parameters={ 'operation': 'cloudify.interfaces.reboot_test.reboot', 'node_ids': ['host'] }, tenant_name=tenant) finally: cfy.executions.start.uninstall(['-d', deployment_id, '--tenant-name', tenant]) with set_client_tenant(manager, tenant): app = manager.client.node_instances.list( node_id='application', deployment_id=deployment_id, )[0] assert os_name == app.runtime_properties['value']
def outputs(self): with set_client_tenant(self.manager, self.tenant): outputs = self.manager.client.deployments.outputs.get( self.deployment_id, )['outputs'] self.logger.info('Deployment outputs: %s%s', os.linesep, json.dumps(outputs, indent=2)) return outputs
def _validate_secrets_created(self): self.logger.info( 'Validating that secrets were created on Tier 1 cluster...' ) secrets = self.client.secrets.list(_all_tenants=True) secrets = {s['key']: s for s in secrets} expected_set = {constants.SECRET_FILE_KEY, constants.SECRET_STRING_KEY} # During upgrade we add secrets for ssh keys, so the actual set might # not be equal exactly, but may contain extra values assert set(secrets.keys()).issuperset(expected_set) file_secret_value = self.client.secrets.get(constants.SECRET_FILE_KEY) assert file_secret_value.value == constants.PY_SCRIPT tenant = secrets[constants.SECRET_STRING_KEY]['tenant_name'] # Temporarily change the tenant in the REST client, to access a secret # on this tenant with util.set_client_tenant(self, tenant): string_secret_value = self.client.secrets.get( constants.SECRET_STRING_KEY).value assert string_secret_value == constants.SECRET_STRING_VALUE self.logger.info('Secrets validated successfully')
def wait_for_execution(manager, execution, logger, tenant=None, change_manager_password=True): _log( 'Getting workflow execution [id={execution}]'.format( execution=execution['id'], ), logger, tenant, ) try: with set_client_tenant(manager, tenant): execution = manager.client.executions.get(execution['id']) except UserUnauthorizedError: if (manager_supports_users_in_snapshot_creation(manager) and change_manager_password): # This will happen on a restore with modified users change_rest_client_password(manager, CHANGED_ADMIN_PASSWORD) logger.info('- execution.status = %s', execution.status) if execution.status not in execution.END_STATES: raise ExecutionWaiting(execution.status) if execution.status != execution.TERMINATED: raise ExecutionFailed(execution.status) return execution
def get_plugins_list(manager, tenant=None): with set_client_tenant(manager, tenant): return [( item['package_name'], item['package_version'], item['distribution'], ) for item in manager.client.plugins.list()]
def _test_agent(agent_type, cfy, manager, attributes): agent_blueprints = { 'a3_2': 'agent/3-2-agent-blueprint/3-2-agent-mispelled-blprint.yaml', 'ssh': 'agent/ssh-agent-blueprint/ssh-agent-blueprint.yaml', } blueprint_path = util.get_resource_path(agent_blueprints[agent_type]) tenant = prepare_and_get_test_tenant( 'agent_{}'.format(agent_type), manager, cfy, ) blueprint_id = deployment_id = agent_type with set_client_tenant(manager, tenant): manager.client.blueprints.upload(blueprint_path, blueprint_id) manager.client.deployments.create( deployment_id, blueprint_id, inputs={ 'ip_address': manager.ip_address, 'user': attributes.default_linux_username, 'private_key_path': manager.remote_private_key_path }, skip_plugins_validation=True) try: cfy.executions.start.install(['-d', deployment_id, '--tenant-name', tenant]) finally: cfy.executions.start.uninstall(['-d', deployment_id, '--tenant-name', tenant])
def deploy_helloworld(manager, inputs, blueprint_id, deployment_id, tenant, logger): version = manager.branch_name _log( 'Deploying {deployment} on {version} manager'.format( deployment=deployment_id, version=version, ), logger, tenant, ) with set_client_tenant(manager, tenant): manager.client.deployments.create( blueprint_id, deployment_id, inputs, skip_plugins_validation=True, ) creation_execution = get_deployment_environment_execution( manager.client, deployment_id, CREATE_DEPLOYMENT) logger.info('Waiting for execution environment') wait_for_execution( manager, creation_execution, logger, tenant, ) logger.info('Deployment environment created')
def assert_nodecellar_working(self, endpoint): # unfortunately, we can't access the nodecellar app directly, # because # 1) the nodecellar hostpool blueprint isn't an _openstack_ # blueprint, so doesn't export the ip correctly in outputs; # 2) the nodecellar nodejs host doesn't even have a floating ip # assigned # 3) the hostpool service blueprint doesn't know about nodecellar, # so the security group doesn't allow connections on the nodecellar # port # Instead, we will figure out the host ip from runtime properties # (to overcome 1), ssh to the manager (to help 2), and from the # manager, ssh to the nodejs host, where we will simply curl # localhost (3) port = endpoint['port'] with util.set_client_tenant(self.manager, self.tenant): nodejs_node = self.manager.client.node_instances.list( node_name='nodejs_host')[0] cloudify_agent = nodejs_node.runtime_properties['cloudify_agent'] ssh_command = ('ssh -o StrictHostKeyChecking=no {user}@{ip} -i {key} ' '"curl -I localhost:{port}"' .format(user=cloudify_agent['user'], ip=nodejs_node.runtime_properties['ip'], key=cloudify_agent['key'], port=port)) with self.manager.ssh() as fabric: response = fabric.sudo(ssh_command) self.assertIn('200 OK', response)
def _assert_scale(manager, deployment_id, outputs, expected_instances, tenant): with set_client_tenant(manager, tenant): instances = manager.client.node_instances.list( deployment_id=deployment_id, _include=['id'], ) assert len(instances) == 9 + 3 * expected_instances _assert_haproxy_load_balancing( outputs, expected_number_of_backends=expected_instances)
def get_plugins_list(manager, tenant=None): with set_client_tenant(manager, tenant): return [ ( item['package_name'], item['package_version'], item['distribution'], ) for item in manager.client.plugins.list() ]
def delete_blueprint(self, use_cfy=False): self.logger.info('Deleting blueprint: {0}'.format(self.blueprint_id)) if use_cfy: self.cfy.profile.set([ '-t', self.tenant, ]) self.cfy.blueprint.delete(self.blueprint_id) else: with set_client_tenant(self.manager, self.tenant): self.manager.client.blueprints.delete(self.blueprint_id)
def assert_deployment_events_exist(self): self.logger.info('Verifying deployment events..') with set_client_tenant(self.manager, self.tenant): executions = self.manager.client.executions.list( deployment_id=self.deployment_id, ) events, total_events = self.manager.client.events.get( executions[0].id, ) self.assertGreater( len(events), 0, 'There are no events for deployment: {0}'.format( self.deployment_id))
def upload_blueprint(self): self.clone_example() blueprint_file = self._cloned_to / self.blueprint_file self._patch_blueprint() self.logger.info('Uploading blueprint: %s [id=%s]', blueprint_file, self.blueprint_id) with set_client_tenant(self.manager, self.tenant): self.manager.client.blueprints.upload(blueprint_file, self.blueprint_id)
def _test_windows_common( cfy, manager, attributes, blueprint_path, inputs, os_name, tenant, deployment_id_prefix): user = attributes.windows_2012_username if not tenant: tenant = prepare_and_get_test_tenant( '{0}_{1}'.format(deployment_id_prefix, os_name), manager, cfy ) effective_inputs = { 'image': attributes.windows_2012_image_name, 'flavor': attributes.medium_flavor_name, 'user': user, 'network_name': attributes.network_name, 'private_key_path': manager.remote_private_key_path, 'keypair_name': attributes.keypair_name, } if inputs: effective_inputs.update(inputs) blueprint_id = deployment_id = '{0}_{1}'.format( deployment_id_prefix, time.time()) blueprint_path = util.get_resource_path(blueprint_path) with set_client_tenant(manager, tenant): manager.client.blueprints.upload(blueprint_path, blueprint_id) manager.client.deployments.create( deployment_id, blueprint_id, inputs=effective_inputs, skip_plugins_validation=True) cfy.executions.start.install(['-d', deployment_id, '--tenant-name', tenant]) try: cfy.executions.start.execute_operation( deployment_id=deployment_id, parameters={ 'operation': 'test.interface.test', 'node_ids': ['test_app'] }, tenant_name=tenant) finally: cfy.executions.start.uninstall(['-d', deployment_id, '--tenant-name', tenant])
def create_deployment(self): self.logger.info( 'Creating deployment [id=%s] with the following inputs:%s%s', self.deployment_id, os.linesep, json.dumps(self.inputs, indent=2)) with set_client_tenant(self.manager, self.tenant): self.manager.client.deployments.create( deployment_id=self.deployment_id, blueprint_id=self.blueprint_id, inputs=self.inputs, skip_plugins_validation=self.skip_plugins_validation) self.cfy.deployments.list(tenant_name=self.tenant)
def _wait_for_autoheal(manager, deployment_id, logger, tenant): logger.info('Waiting for heal workflow to start/complete..') with util.set_client_tenant(manager, tenant): executions = [ e for e in manager.client.executions.list( deployment_id=deployment_id) if e.workflow_id == 'heal' ] logger.info('Found heal executions:%s%s', os.linesep, json.dumps(executions, indent=2)) assert len(executions) == 1 assert executions[0].status == 'terminated'
def _get_heal_workflow_events(cfy, manager, deployment_id, logger, tenant): logger.info('Getting heal workflow events..') with util.set_client_tenant(manager, tenant): executions = [ e for e in manager.client.executions.list( deployment_id=deployment_id) if e.workflow_id == 'heal' ] if executions: assert len(executions) == 1 cfy.events.list(['-e', executions[0].id, '--tenant-name', tenant]) else: logger.info('No heal executions found.')
def assert_deployment_events_exist(self): self.logger.info('Verifying deployment events..') with set_client_tenant(self.manager, self.tenant): executions = self.manager.client.executions.list( deployment_id=self.deployment_id, ) events, total_events = self.manager.client.events.get( executions[0].id, ) self.assertGreater(len(events), 0, 'There are no events for deployment: {0}'.format( self.deployment_id))
def upload_and_install_helloworld(attributes, logger, manager, target_vm, tmpdir, prefix='', tenant=None): assert not is_hello_world(target_vm), ( 'Hello world blueprint already installed!' ) version = manager.branch_name _log( 'Uploading helloworld blueprint to {version} manager'.format( version=version, ), logger, tenant, ) blueprint_id = prefix + BLUEPRINT_ID deployment_id = prefix + DEPLOYMENT_ID inputs = { 'server_ip': target_vm.ip_address, 'agent_user': attributes.centos_7_username, 'agent_private_key_path': manager.remote_private_key_path, } upload_helloworld( manager, 'test-bp.yaml', blueprint_id, tenant, logger, ) deploy_helloworld( manager, inputs, blueprint_id, deployment_id, tenant, logger, ) with set_client_tenant(manager, tenant): execution = manager.client.executions.start( deployment_id, 'install') logger.info('Waiting for installation to finish') wait_for_execution( manager, execution, logger, tenant, ) assert is_hello_world(target_vm), ( 'Hello world blueprint did not install correctly.' )
def upload_and_install_helloworld(attributes, logger, manager, target_vm, tmpdir, prefix='', tenant=None): assert not is_hello_world(target_vm), ( 'Hello world blueprint already installed!') version = manager.branch_name _log( 'Uploading helloworld blueprint to {version} manager'.format( version=version, ), logger, tenant, ) blueprint_id = prefix + BLUEPRINT_ID deployment_id = prefix + DEPLOYMENT_ID inputs = { 'server_ip': target_vm.ip_address, 'agent_user': attributes.centos_7_username, 'agent_private_key_path': manager.remote_private_key_path, } upload_helloworld( manager, 'test-bp.yaml', blueprint_id, tenant, logger, ) deploy_helloworld( manager, inputs, blueprint_id, deployment_id, tenant, logger, ) with set_client_tenant(manager, tenant): execution = manager.client.executions.start(deployment_id, 'install') logger.info('Waiting for installation to finish') wait_for_execution( manager, execution, logger, tenant, ) assert is_hello_world(target_vm), ( 'Hello world blueprint did not install correctly.')
def upload_helloworld(manager, blueprint, blueprint_id, tenant, logger): version = manager.branch_name logger.info( 'Uploading blueprint {blueprint} from archive {archive} as {name} ' 'for manager version {version}'.format( blueprint=blueprint, archive=HELLO_WORLD_URL, name=blueprint_id, version=version, ) ) with set_client_tenant(manager, tenant): manager.client.blueprints.publish_archive( HELLO_WORLD_URL, blueprint_id, blueprint, )
def upload_helloworld(manager, blueprint, blueprint_id, tenant, logger): version = manager.branch_name url = OLD_WORLD_URL if version in ('3.4.2', '4.0') else HELLO_WORLD_URL logger.info( 'Uploading blueprint {blueprint} from archive {archive} as {name} ' 'for manager version {version}'.format( blueprint=blueprint, archive=url, name=blueprint_id, version=version, )) with set_client_tenant(manager, tenant): manager.client.blueprints.publish_archive( url, blueprint_id, blueprint, )
def repetitive_check(): with set_client_tenant(manager, tenant): dep_updates_list = manager.client.deployment_updates.list( deployment_id=deployment_id) executions_list = manager.client.executions.list( deployment_id=deployment_id, workflow_id='update', _include=['status']) if len(dep_updates_list) != update_counter: return False for deployment_update in dep_updates_list: if deployment_update.state not in ['failed', 'successful']: return False for execution in executions_list: if execution['status'] not in [ 'terminated', 'failed', 'cancelled' ]: return False return True
def upload_blueprint(self, use_cfy=False): self.clone_example() blueprint_file = self._cloned_to / self.blueprint_file self._patch_blueprint() self.logger.info('Uploading blueprint: %s [id=%s]', blueprint_file, self.blueprint_id) if use_cfy: self.cfy.profile.set([ '-t', self.tenant, ]) self.cfy.blueprint.upload([ '-b', self.blueprint_id, blueprint_file ]) else: with set_client_tenant(self.manager, self.tenant): self.manager.client.blueprints.upload( blueprint_file, self.blueprint_id)
def repetitive_check(): with set_client_tenant(manager, tenant): dep_updates_list = manager.client.deployment_updates.list( deployment_id=deployment_id) executions_list = manager.client.executions.list( deployment_id=deployment_id, workflow_id='update', _include=['status'] ) if len(dep_updates_list) != update_counter: return False for deployment_update in dep_updates_list: if deployment_update.state not in ['failed', 'successful']: return False for execution in executions_list: if execution['status'] not in ['terminated', 'failed', 'cancelled']: return False return True
def prepared_manager(manager, cfy, logger): tenants = sorted(TENANT_DEPLOYMENT_COUNTS.keys()) for tenant in tenants: # Sometimes rabbit isn't ready to have new tenants added immediately # after startup, so wait for the tenants to be successfully created # before we continue (to avoid it erroring when creating a deployment # instead) for attempt in xrange(30): try: if tenant != 'default_tenant': manager.client.tenants.create(tenant) break except CloudifyClientError: time.sleep(2) for tenant in tenants: with set_client_tenant(manager, tenant): for blueprint, bp_path in BLUEPRINTS.items(): manager.client.blueprints.upload( path=bp_path, entity_id=blueprint, ) for bp_name, count in TENANT_DEPLOYMENT_COUNTS[tenant].items(): for i in xrange(count): deployment_id = bp_name + str(i) manager.client.deployments.create( blueprint_id=bp_name, deployment_id=deployment_id, ) manager.wait_for_all_executions() for i in xrange(count): deployment_id = bp_name + str(i) manager.client.executions.start( deployment_id, 'install', ) manager.wait_for_all_executions() yield manager
def remove_and_check_deployments(hello_vms, manager, logger, tenants=('default_tenant', ), with_prefixes=False): for tenant in tenants: _log( 'Uninstalling hello world deployments from manager', logger, tenant, ) _log( 'Found deployments: {deployments}'.format(deployments=', '.join( get_deployments_list(manager, tenant)), ), logger, tenant, ) with set_client_tenant(manager, tenant): if with_prefixes: deployment_id = tenant + DEPLOYMENT_ID else: deployment_id = DEPLOYMENT_ID execution = manager.client.executions.start( deployment_id, 'uninstall', ) logger.info('Waiting for uninstall to finish') wait_for_execution( manager, execution, logger, tenant, ) _log('Uninstalled deployments', logger, tenant) assert_hello_worlds(hello_vms, installed=False, logger=logger)
def _validate_secrets_created(self): self.logger.info( 'Validating that secrets were created on Tier 1 cluster...') secrets = self.client.secrets.list(_all_tenants=True) secrets = {s['key']: s for s in secrets} expected_set = {constants.SECRET_FILE_KEY, constants.SECRET_STRING_KEY} # During upgrade we add secrets for ssh keys, so the actual set might # not be equal exactly, but may contain extra values assert set(secrets.keys()).issuperset(expected_set) file_secret_value = self.client.secrets.get(constants.SECRET_FILE_KEY) assert file_secret_value.value == constants.PY_SCRIPT tenant = secrets[constants.SECRET_STRING_KEY]['tenant_name'] # Temporarily change the tenant in the REST client, to access a secret # on this tenant with util.set_client_tenant(self, tenant): string_secret_value = self.client.secrets.get( constants.SECRET_STRING_KEY).value assert string_secret_value == constants.SECRET_STRING_VALUE self.logger.info('Secrets validated successfully')
def remove_and_check_deployments(hello_vms, manager, logger, tenants=('default_tenant',), with_prefixes=False): for tenant in tenants: _log( 'Uninstalling hello world deployments from manager', logger, tenant, ) _log( 'Found deployments: {deployments}'.format( deployments=', '.join(get_deployments_list(manager, tenant)), ), logger, tenant, ) with set_client_tenant(manager, tenant): if with_prefixes: deployment_id = tenant + DEPLOYMENT_ID else: deployment_id = DEPLOYMENT_ID execution = manager.client.executions.start( deployment_id, 'uninstall', ) logger.info('Waiting for uninstall to finish') wait_for_execution( manager, execution, logger, tenant, ) _log('Uninstalled deployments', logger, tenant) assert_hello_worlds(hello_vms, installed=False, logger=logger)
def _get_host_instance_id(manager, hello_world): with set_client_tenant(manager, hello_world.tenant): # We should only have a single instance of the `vm` node instance = manager.client.node_instances.list( deployment_id=hello_world.deployment_id, node_id='vm')[0] return instance.id
def upload_test_plugin(manager, logger, tenant=None): _log('Uploading test plugin', logger, tenant) with set_client_tenant(manager, tenant): manager.client.plugins.upload(TEST_PLUGIN_URL) manager.wait_for_all_executions()
def get_deployments_list(manager, tenant=None): with set_client_tenant(manager, tenant): return [ item['id'] for item in manager.client.deployments.list() ]
def get_nodes(manager, tenant=None): with set_client_tenant(manager, tenant): return manager.client.nodes.list()
def get_secrets_list(manager, tenant=None): with set_client_tenant(manager, tenant): return [item['key'] for item in manager.client.secrets.list()]
def get_deployments_list(manager, tenant=None): with set_client_tenant(manager, tenant): return [item['id'] for item in manager.client.deployments.list()]
def get_secrets_list(manager, tenant=None): with set_client_tenant(manager, tenant): return [ item['key'] for item in manager.client.secrets.list() ]
def delete_deployment(self): self.logger.info('Deleting deployment...') with set_client_tenant(self.manager, self.tenant): self.manager.client.deployments.delete( self.deployment_id, )
def delete_deployment(self): self.logger.info('Deleting deployment...') with set_client_tenant(self.manager, self.tenant): self.manager.client.deployments.delete(self.deployment_id, )