def test_network_minimal(self): n = {"id": "n1", "roles": ["r1"], "site": "siteA", "type": "prod"} network = NetworkConfiguration.from_dictionnary(n) self.assertEqual("siteA", network.site) self.assertEqual(["r1"], network.roles) self.assertEqual("prod", network.type) self.assertEqual("n1", network.id)
def test_programmatic(self): conf = Configuration() network = NetworkConfiguration(id="id", roles=["my_network"], type="prod", site="rennes") conf.add_network_conf(network)\ .add_machine_conf(MachineConfiguration(roles=["r1"], cluster="paravance", primary_network=network))\ .add_machine_conf(MachineConfiguration(roles=["r2"], cluster="parapluie", primary_network=network, nodes=10)) conf.finalize() self.assertEqual(2, len(conf.machines)) self.assertEqual(1, len(conf.networks))
def make_conf(testing=True): kavlan = NetworkConfiguration(id="net", type="kavlan", roles=["network_interface"], site="nantes") prod_net = NetworkConfiguration(id="net_ext", type="prod", roles=["neutron_external_interface"], site="nantes") os = MachineConfiguration(roles=["OpenStack"], cluster="ecotype", nodes=10 if testing else 15, primary_network=kavlan, secondary_networks=[prod_net]) conf = None if testing: os.cluster = "paravance" kavlan.site = "rennes" prod_net.site = "rennes" conf = ( Configuration.from_settings( walltime="5:00:00", job_name="os-imt-aio-test", env_name="ubuntu1804-x64-min", # You can specify a jobid with # oargrid_jobids=[["nantes", "189621"]] ).add_network_conf(kavlan).add_network_conf(prod_net).add_machine( **os.__dict__)) else: conf = (Configuration.from_settings( reservation="2020-02-03 12:00:01", walltime="59:59:58", job_name="os-imt-aio", env_name="ubuntu1804-x64-min").add_network_conf( prod_net).add_machine(**os.__dict__)) conf.finalize() return conf
from enoslib.infra.enos_g5k.provider import G5k from enoslib.infra.enos_g5k.configuration import (Configuration, NetworkConfiguration) import logging import os logging.basicConfig(level=logging.INFO) # path to the inventory inventory = os.path.join(os.getcwd(), "hosts") # claim the resources conf = Configuration.from_settings(job_type="allow_classic_ssh") prod_network = NetworkConfiguration(id="n1", type="prod", roles=["my_network"], site="rennes") conf.add_network_conf(prod_network)\ .add_network(id="not_linked_to_any_machine", type="slash_22", roles=["my_subnet"], site="rennes")\ .add_machine(roles=["control"], cluster="parapluie", nodes=1, primary_network=prod_network)\ .finalize() provider = G5k(conf) roles, networks = provider.init()
cs = get_all_clusters_sites() for cluster in CLUSTERS: if cluster not in cs: raise Exception( f'Cluster {cluster} was not found in list of clusters…') ## parallel callibration of clusters conf = Configuration.from_settings( job_type='allow_classic_ssh', job_name=f'calibrate energy-service at {cluster}', walltime='01:00:00') ## (TODO) check the default available network at each site network = NetworkConfiguration(id='n1', type='prod', roles=['my_network'], site=cs[cluster]) conf.add_network_conf(network)\ .add_machine(roles=['calibrate'], cluster=cluster, nodes=1, ## we deploy everything on 1 machine primary_network=network)\ .finalize() provider = G5k(conf) roles, networks = provider.init() roles = discover_networks(roles, networks) ## #A deploy the energy monitoring stack