def init_compressed_csar(csar_name: str, inputs: typing.Optional[dict], storage: Storage, clean_storage: bool): if storage.exists("root_file"): if clean_storage: storage.remove_all() else: print("Looks like service template or CSAR has already been initialized. " "Use the --clean/-c flag to clear the storage.") return if inputs is None: inputs = {} storage.write_json(inputs, "inputs") csars_dir = Path(storage.path) / "csars" csars_dir.mkdir(exist_ok=True) csar = CloudServiceArchive.create(PurePath(csar_name)) csar.validate_csar() tosca_service_template = csar.get_entrypoint() # unzip csar and save the path to storage csar_dir = csars_dir / Path("csar") ZipFile(csar_name, "r").extractall(csar_dir) csar_tosca_service_template_path = csar_dir / tosca_service_template storage.write(str(csar_tosca_service_template_path), "root_file") # try to initiate service template from csar ast = tosca.load(Path(csar_dir), Path(tosca_service_template)) template = ast.get_template(inputs) template.instantiate(storage)
def test_undeclared_requirements(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text( yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 topology_template: node_templates: node_1: type: tosca.nodes.SoftwareComponent node_2: type: tosca.nodes.SoftwareComponent requirements: - dependency: node_1 node_3: type: tosca.nodes.SoftwareComponent requirements: - dependency_not_defined1: node_1 """)) storage = Storage(tmp_path / pathlib.Path(".opera")) storage.write("template.yaml", "root_file") ast = tosca.load(tmp_path, name) with pytest.raises( ParseError, match="Undeclared requirements: dependency_not_defined1"): ast.get_template({})
def update(storage_old: Storage, workdir_old: str, storage_new: Storage, workdir_new: str, instance_comparer: InstanceComparer, instance_diff: Diff, verbose_mode: bool, num_workers: int, overwrite: bool): template_old = get_template(storage_old) template_new = get_template(storage_new) topology_old = template_old.instantiate(storage_old) topology_new = template_new.instantiate(storage_new) if verbose_mode: print(format_outputs(instance_diff.outputs(), "json")) instance_comparer.prepare_update(topology_old, topology_new, instance_diff) topology_old.undeploy(verbose_mode, workdir_old, num_workers) topology_old.write_all() if overwrite: # swap storage topology_new.set_storage(storage_old) # rewrite inputs and root file storage_old.write_json(storage_new.read_json("inputs"), "inputs") storage_old.write(storage_new.read("root_file"), "root_file") topology_new.write_all() topology_new.deploy(verbose_mode, workdir_new, num_workers)
def test_undefined_required_properties3(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text( yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 node_types: my_node_type: derived_from: tosca.nodes.Root properties: property1: type: integer property2: type: float property3: type: string topology_template: node_templates: my_node_template: type: my_node_type properties: property1: 42 property2: 42.0 """)) storage = Storage(tmp_path / pathlib.Path(".opera")) storage.write("template.yaml", "root_file") ast = tosca.load(tmp_path, name) with pytest.raises(ParseError, match="Missing a required property: property3"): ast.get_template({})
def deploy(args): if args.instance_path and not path.isdir(args.instance_path): raise argparse.ArgumentTypeError( "Directory {0} is not a valid path!".format(args.instance_path)) storage = Storage(Path(args.instance_path).joinpath( ".opera")) if args.instance_path else Storage(Path(".opera")) storage.write(args.csar.name, "root_file") # TODO(@tadeboro): This should be part of the init command that we do not # have yet. try: inputs = yaml.safe_load(args.inputs) if args.inputs else {} storage.write_json(inputs, "inputs") except Exception as e: print("Invalid inputs: {}".format(e)) return 1 try: ast = tosca.load(Path.cwd(), PurePath(args.csar.name)) template = ast.get_template(inputs) topology = template.instantiate(storage) topology.deploy() except ParseError as e: print("{}: {}".format(e.loc, e)) return 1 except DataError as e: print(str(e)) return 1 return 0
def deploy(service_template: str, inputs: typing.Optional[dict], storage: Storage, verbose_mode: bool, num_workers: int, delete_existing_state: bool): """ :raises ParseError: :raises DataError: """ if delete_existing_state: storage.remove("instances") if inputs is None: if storage.exists("inputs"): inputs = yaml.safe_load(storage.read("inputs")) else: inputs = {} storage.write_json(inputs, "inputs") storage.write(service_template, "root_file") workdir = str(Path.cwd()) if storage.exists("csars"): csar_dir = Path(storage.path) / "csars" / "csar" workdir = str(csar_dir) ast = tosca.load(Path(csar_dir), PurePath(service_template).relative_to(csar_dir)) else: ast = tosca.load(Path.cwd(), PurePath(service_template)) template = ast.get_template(inputs) topology = template.instantiate(storage) topology.deploy(verbose_mode, workdir, num_workers)
def deploy_service_template(service_template_path: PurePath, inputs: typing.Optional[dict], storage: Storage, verbose_mode: bool, num_workers: int, delete_existing_state: bool): if delete_existing_state: storage.remove("instances") if inputs is None: if storage.exists("inputs"): inputs = yaml.safe_load(storage.read("inputs")) else: inputs = {} storage.write_json(inputs, "inputs") storage.write(str(service_template_path), "root_file") # set workdir and check if service template/CSAR has been initialized workdir = Path(service_template_path.parent) if storage.exists("csars"): csar_dir = Path(storage.path) / "csars" / "csar" workdir = csar_dir ast = tosca.load(workdir, service_template_path.relative_to(csar_dir)) else: ast = tosca.load(workdir, PurePath(service_template_path.name)) # initialize service template and deploy template = ast.get_template(inputs) topology = template.instantiate(storage) topology.deploy(verbose_mode, workdir, num_workers)
def deploy_compressed_csar(csar_path: PurePath, inputs: typing.Optional[dict], storage: Storage, verbose_mode: bool, num_workers: int, delete_existing_state: bool): if delete_existing_state: storage.remove("instances") if inputs is None: inputs = {} storage.write_json(inputs, "inputs") csars_dir = Path(storage.path) / "csars" csars_dir.mkdir(exist_ok=True) csar = CloudServiceArchive.create(csar_path) csar.validate_csar() tosca_service_template = csar.get_entrypoint() # unzip csar, save the path to storage and set workdir csar_dir = csars_dir / Path("csar") ZipFile(csar_path, "r").extractall(csar_dir) # pylint: disable=consider-using-with csar_tosca_service_template_path = csar_dir / tosca_service_template storage.write(str(csar_tosca_service_template_path), "root_file") workdir = str(csar_dir) # initialize service template from CSAR and deploy ast = tosca.load(Path(csar_dir), Path(tosca_service_template)) template = ast.get_template(inputs) topology = template.instantiate(storage) topology.deploy(verbose_mode, workdir, num_workers)
def get_nodes(self, file, temp_Dir): try: entry_file = file storage = Storage(Path(temp_Dir)) storage.write(entry_file, "root_file") ast = tosca.load(Path(temp_Dir), PurePath(entry_file)) template = ast.get_template({}) topology = template.instantiate(storage) nodes = topology.nodes node_keys = list(nodes.keys()) node_values = list(nodes.values()) node_list = [] node_keys_updates = [] for val in range(len(node_values)): if len(node_values[val].template.requirements) > 0: temp_node = node_keys[val][:-2] node_list.append(temp_node) for node in node_keys: node_keys_updates.append(node[:-2]) return node_list, node_keys_updates except ParseError as e: print("{}: {}".format(e.loc, e)) return 1, 1 except DataError as e: print(str(e)) return 1, 1
def prepare_template(path, yaml_text, template): name = pathlib.PurePath("template.yaml") (path / name).write_text(yaml_text(template)) storage = Storage(path / pathlib.Path(".opera")) storage.write("template.yaml", "root_file") ast = tosca.load(path, name) template = ast.get_template({}) topology = template.instantiate(storage) return template, topology, path, storage
def initialize_service_template(service_template: str, inputs: typing.Optional[dict], storage: Storage): if inputs is None: inputs = {} storage.write_json(inputs, "inputs") storage.write(service_template, "root_file") ast = tosca.load(Path.cwd(), PurePath(service_template)) template = ast.get_template(inputs) template.instantiate(storage)
def init_service_template(service_template: str, inputs: typing.Optional[dict], storage: Storage, clean_storage: bool): if storage.exists("root_file"): if clean_storage: storage.remove_all() else: print("Looks like service template or CSAR has already been initialized. " "Use --clean/-c flag to clear the storage.") return if inputs is None: inputs = {} storage.write_json(inputs, "inputs") storage.write(service_template, "root_file") ast = tosca.load(Path.cwd(), PurePath(service_template)) template = ast.get_template(inputs) template.instantiate(storage)
def service_template(self, tmp_path, yaml_text): name = pathlib.PurePath("template.yaml") (tmp_path / name).write_text( yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 node_types: my_base_type: derived_from: tosca.nodes.Root attributes: colour: type: string my_node_type: derived_from: my_base_type my_collector_node_type: derived_from: my_base_type requirements: - my_target: capability: tosca.capabilities.Root relationship: my_relationship_type relationship_types: my_relationship_type: derived_from: tosca.relationships.Root attributes: colour: type: string topology_template: node_templates: my_node: type: my_node_type my_collector: type: my_collector_node_type requirements: - my_target: my_node """)) storage = Storage(tmp_path / pathlib.Path(".opera")) storage.write("template.yaml", "root_file") ast = tosca.load(tmp_path, name) template = ast.get_template({}) template.instantiate(storage) yield template
def service_template(self, tmp_path, yaml_text): name = pathlib.PurePath("service.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 node_types: eval_test_type: derived_from: tosca.nodes.Root properties: test_string_input: type: string test_map_input: type: map test_list_input: type: list topology_template: inputs: marker: type: string node_templates: test_node: type: eval_test_type properties: test_string_input: { get_input: marker } test_map_input: ENV: { get_input: marker } ENV2: test ENV3: { join: [ [ "test", "_", "join" ] ] } test_list_input: - test - { get_input: marker } - { join: [ [ "test", "_", "join" ] ] } """ )) storage = Storage(tmp_path / pathlib.Path(".opera")) storage.write("service.yaml", "root_file") ast = tosca.load(tmp_path, name) template = ast.get_template({"marker": "test_input"}) yield template
def initialize_compressed_csar(csar_name: str, inputs: typing.Optional[dict], storage: Storage): if inputs is None: inputs = {} storage.write_json(inputs, "inputs") csars_dir = Path(storage.path) / "csars" csars_dir.mkdir(exist_ok=True) # validate csar csar = CloudServiceArchive(csar_name, csars_dir) tosca_service_template = csar.validate_csar() # unzip csar and save the path to storage csar_dir = csars_dir / Path("csar") ZipFile(csar_name, 'r').extractall(csar_dir) csar_tosca_service_template_path = csar_dir / tosca_service_template storage.write(str(csar_tosca_service_template_path), "root_file") # try to initiate service template from csar ast = tosca.load(Path(csar_dir), Path(tosca_service_template)) template = ast.get_template(inputs) template.instantiate(storage)
def service_template(self, tmp_path, yaml_text): name = pathlib.PurePath("service.yaml") (tmp_path / name).write_text(yaml_text( # language=yaml """ tosca_definitions_version: tosca_simple_yaml_1_3 node_types: steampunk.nodes.VM: derived_from: tosca.nodes.Compute interfaces: Standard: type: tosca.interfaces.node.lifecycle.Standard operations: create: playbooks/create.yaml delete: playbooks/delete.yaml scaling_up: type: steampunk.interfaces.scaling.ScaleUp scaling_down: type: steampunk.interfaces.scaling.ScaleDown autoscaling: operations: retrieve_info: description: Operation for autoscaling. implementation: playbooks/retrieve_info.yaml autoscale: description: Operation for autoscaling. implementation: playbooks/auto_scale.yaml inputs: min_size: type: float value: { get_property: [ autoscale, min_size ] } max_size: type: float value: { get_property: [ autoscale, max_size ] } steampunk.nodes.ConfigureMonitoring: derived_from: tosca.nodes.Root interfaces: Standard: type: tosca.interfaces.node.lifecycle.Standard operations: configure: implementation: playbooks/configure.yaml inputs: cpu_lower_bound: type: float value: { get_property: [ steampunk.policies.scaling.ScaleDown, cpu_lower_bound ] } cpu_upper_bound: type: float value: { get_property: [ steampunk.policies.scaling.ScaleUp, cpu_upper_bound ] } interface_types: steampunk.interfaces.scaling.ScaleDown: derived_from: tosca.interfaces.Root operations: scale_down: inputs: adjustment: type: float value: { get_property: [ steampunk.policies.scaling.ScaleDown, adjustment ] } description: Operation for scaling down. implementation: playbooks/scale_down.yaml steampunk.interfaces.scaling.ScaleUp: derived_from: tosca.interfaces.Root operations: scale_up: inputs: adjustment: type: float value: { get_property: [ steampunk.policies.scaling.ScaleUp, adjustment ] } description: Operation for scaling up. implementation: playbooks/scale_up.yaml policy_types: steampunk.policies.scaling.ScaleDown: derived_from: tosca.policies.Scaling properties: cpu_lower_bound: description: The lower bound for the CPU type: float required: false constraints: - less_or_equal: 20.0 adjustment: description: The amount by which to scale type: integer required: false constraints: - less_or_equal: -1 targets: [ steampunk.nodes.VM, steampunk.nodes.ConfigureMonitoring ] triggers: steampunk.triggers.scaling.ScaleDown: description: A trigger for scaling down event: scale_down_trigger target_filter: node: steampunk.nodes.VM condition: constraint: - not: - and: - available_instances: [ { greater_than: 42 } ] - available_space: [ { greater_than: 1000 } ] action: - call_operation: operation: scaling_down.scale_down inputs: adjustment: { get_property: [ SELF, adjustment ] } steampunk.policies.scaling.ScaleUp: derived_from: tosca.policies.Scaling properties: cpu_upper_bound: description: The upper bound for the CPU type: float required: false constraints: - greater_or_equal: 80.0 adjustment: description: The amount by which to scale type: integer required: false constraints: - greater_or_equal: 1 targets: [ steampunk.nodes.VM, steampunk.nodes.ConfigureMonitoring ] triggers: steampunk.triggers.scaling.ScaleUp: description: A trigger for scaling up event: scale_up_trigger target_filter: node: steampunk.nodes.VM condition: constraint: - not: - and: - available_instances: [ { greater_than: 42 } ] - available_space: [ { greater_than: 1000 } ] action: - call_operation: operation: scaling_up.scale_up inputs: adjustment: { get_property: [ SELF, adjustment ] } steampunk.policies.scaling.AutoScale: derived_from: tosca.policies.Scaling properties: min_size: type: integer description: The minimum number of instances required: true status: supported constraints: - greater_or_equal: 1 max_size: type: integer description: The maximum number of instances required: true status: supported constraints: - greater_or_equal: 10 topology_template: node_templates: VM: type: steampunk.nodes.VM ConfigureMonitoring: type: steampunk.nodes.ConfigureMonitoring policies: - scale_down: type: steampunk.policies.scaling.ScaleDown properties: cpu_lower_bound: 10 adjustment: 1 - scale_up: type: steampunk.policies.scaling.ScaleUp properties: cpu_upper_bound: 90 adjustment: 5 - autoscale: type: steampunk.policies.scaling.AutoScale properties: min_size: 3 max_size: 7 targets: [ VM ] triggers: steampunk.triggers.scaling.AutoScale: description: A trigger for autoscaling event: auto_scale_trigger schedule: start_time: 2020-04-08T21:59:43.10-06:00 end_time: 2022-04-08T21:59:43.10-06:00 target_filter: node: VM requirement: workstation capability: host_capability condition: constraint: - not: - and: - available_instances: [ { greater_than: 42 } ] - available_space: [ { greater_than: 1000 } ] period: 60 sec evaluations: 2 method: average action: - call_operation: autoscaling.retrieve_info - call_operation: autoscaling.autoscale """ )) # language=yaml playbook = \ """ - hosts: all tasks: - name: Debug debug: msg: "Just testing." """ pathlib.Path.mkdir(tmp_path / "playbooks") (tmp_path / "playbooks" / "create.yaml").write_text(yaml_text(playbook)) (tmp_path / "playbooks" / "delete.yaml").write_text(yaml_text(playbook)) (tmp_path / "playbooks" / "configure.yaml").write_text(yaml_text(playbook)) (tmp_path / "playbooks" / "scale_up.yaml").write_text(yaml_text(playbook)) (tmp_path / "playbooks" / "scale_down.yaml").write_text(yaml_text(playbook)) (tmp_path / "playbooks" / "retrieve_info.yaml").write_text(yaml_text(playbook)) (tmp_path / "playbooks" / "auto_scale.yaml").write_text(yaml_text(playbook)) storage = Storage(tmp_path / pathlib.Path(".opera")) storage.write("service.yaml", "root_file") ast = tosca.load(tmp_path, name) template = ast.get_template({}) template.instantiate(storage) yield template