def test_multivm_with_diff_bootconfig(): """ Tests in case of multi-vm blueprint, correct disk is set to bootable """ # Ahv Helpers uses Metadata Context, It should the context(if any) defined in this file only get_metadata_payload(__file__) ContextObj = get_context() ContextObj.reset_configuration() spec = AhvBlueprint.get_dict() substrate_list = spec["substrate_definition_list"] # From AhvBlueprint class # substrate_list[0] = AhvVmSubstrate and substrate_list[1] = AhvVmSubstrate2 # In AhvVmSubstrate -> MyAhvVm (vm_cls) # Check SCSI disk with device_index = 2 is bootable ahv_vm_substrate_spec = substrate_list[0] assert ahv_vm_substrate_spec["create_spec"]["resources"]["boot_config"] == { "boot_device": {"disk_address": {"device_index": 2, "adapter_type": "SCSI"}} } # In AhvVmSubstrate2 -> MyAhvVm2 (vm_cls) # Check PCI disk with device_index = 0 is bootable ahv_vm_substrate2_spec = substrate_list[1] assert ahv_vm_substrate2_spec["create_spec"]["resources"]["boot_config"] == { "boot_device": {"disk_address": {"device_index": 0, "adapter_type": "PCI"}} }
def test_json(): # Ahv Helpers uses Metadata Context, It should the context(if any) defined in this file only get_metadata_payload(__file__) ContextObj = get_context() ContextObj.reset_configuration() print(MyAhvVm.json_dumps(pprint=True))
def compile_blueprint(bp_file, brownfield_deployment_file=None): # Constructing metadata payload # Note: This should be constructed before loading bp module. As metadata will be used while getting bp_payload metadata_payload = get_metadata_payload(bp_file) user_bp_module = get_blueprint_module_from_file(bp_file) UserBlueprint = get_blueprint_class_from_module(user_bp_module) if UserBlueprint is None: return None # Fetching bf_deployments bf_deployments = get_brownfield_deployment_classes(brownfield_deployment_file) if bf_deployments: bf_dep_map = {bd.__name__: bd for bd in bf_deployments} for pf in UserBlueprint.profiles: for ind, dep in enumerate(pf.deployments): if dep.__name__ in bf_dep_map: bf_dep = bf_dep_map[dep.__name__] # Add the packages and substrates from deployment bf_dep.packages = dep.packages bf_dep.substrate = dep.substrate # If name attribute not exists in brownfield deployment file and given in blueprint file, # Use the one that is given in blueprint file if dep.name and (not bf_dep.name): bf_dep.name = dep.name # Replacing new deployment in profile.deployments pf.deployments[ind] = bf_dep bp_payload = None if isinstance(UserBlueprint, type(SimpleBlueprint)): bp_payload = UserBlueprint.make_bp_dict() if "project_reference" in metadata_payload: bp_payload["metadata"]["project_reference"] = metadata_payload[ "project_reference" ] else: if isinstance(UserBlueprint, type(VmBlueprint)): UserBlueprint = UserBlueprint.make_bp_obj() UserBlueprintPayload, _ = create_blueprint_payload( UserBlueprint, metadata=metadata_payload ) bp_payload = UserBlueprintPayload.get_dict() # Adding the display map to client attr display_name_map = get_dsl_metadata_map() bp_payload["spec"]["resources"]["client_attrs"] = {"None": display_name_map} # Note - Install/Uninstall runbooks are not actions in Packages. # Remove package actions after compiling. cdict = bp_payload["spec"]["resources"] for package in cdict["package_definition_list"]: if "action_list" in package: del package["action_list"] return bp_payload
def test_metadata_in_blueprint(self): """ Tests metadata in blueprint file """ runner = CliRunner() # Writing project name to project file for blueprint make_file_dir(LOCAL_PROJECTNAME_FILE) with open(LOCAL_PROJECTNAME_FILE, "w") as f: f.write(self.dsl_project_name) # Compile Blueprint file LOG.info("Compiling Blueprint with metadata") result = runner.invoke( cli, ["compile", "bp", "--file={}".format(DSL_BP_FILEPATH)] ) if result.exit_code: cli_res_dict = {"Output": result.output, "Exception": str(result.exception)} LOG.debug( "Cli Response: {}".format( json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) ) ) LOG.debug( "Traceback: \n{}".format( "".join(traceback.format_tb(result.exc_info[2])) ) ) pytest.fail("BP compile command failed") bp_payload = json.loads(result.output) # Checking the presence of correct project in metadata LOG.info("Checking the project in metadata") assert ( bp_payload["metadata"]["project_reference"]["name"] == self.dsl_project_name ) # Restoring the metadata context get_metadata_payload(__file__)
def test_project_with_env_create_and_delete(self): """ Describe and update flow are already checked in `test_project_crud` It will test only create and delete flow on projects with environment """ runner = CliRunner() self.dsl_project_name = "Test_DSL_Project_Env{}".format( str(uuid.uuid4())) LOG.info("Testing 'calm create project' command") result = runner.invoke( cli, [ "create", "project", "--file={}".format(DSL_PROJECT_WITH_ENV_PATH), "--name={}".format(self.dsl_project_name), "--description='Test DSL Project with Env to delete'", ], ) if result.exit_code: cli_res_dict = { "Output": result.output, "Exception": str(result.exception) } LOG.debug("Cli Response: {}".format( json.dumps(cli_res_dict, indent=4, separators=(",", ": ")))) LOG.debug("Traceback: \n{}".format("".join( traceback.format_tb(result.exc_info[2])))) pytest.fail("Project creation from python file failed") LOG.info("Success") self._test_project_delete() # Restoring the metadata context get_metadata_payload(__file__)