def test_from_dictionnary_no_network(self): d = { "resources": { "machines": [{ "roles": ["r1"], "nodes": 2, "cluster": "cluste1", "primary_network": "n1" }], "networks": [] } } with self.assertRaises(ValueError) as _: Configuration.from_dictionnary(d)
def test_from_dictionnary_unbound_secondary_networks(self): d = { "resources": { "machines": [{ "roles": ["r1"], "nodes": 2, "cluster": "cluste1", "primary_network": "n1", "secondary_networks": ["n2"] }], "networks": [{ "id": "network", "roles": ["n1"], "site": "siteA", "type": "prod" }, { "id": "unbound_network", "roles": ["nr2"], "site": "siteA", "type": "kavlan" }] } } with self.assertRaises(ValueError) as ctx: conf = Configuration.from_dictionnary(d)
def test_from_dictionnary_with_machines(self): d = { "resources": { "machines": [{ "roles": ["r1"], "nodes": 2, "cluster": "cluste1", "primary_network": "n1" }], "networks": [{ "id": "n1", "roles": ["rn1"], "site": "siteA", "type": "prod" }] } } conf = Configuration.from_dictionnary(d) self.assertTrue(len(conf.machines) == 1) self.assertTrue(len(conf.networks) == 1) machine_group = conf.machines[0] network = conf.networks[0] self.assertEqual(2, machine_group.nodes) # check the network ref self.assertEqual(network, machine_group.primary_network)
def g5k(config, force, env=None, **kwargs): conf = G5kConf.from_dictionnary(config["g5k"]) provider = G5k(conf) roles, networks = provider.init(force_deploy=force) env["config"] = config env["roles"] = roles env["networks"] = networks env["context"] = "g5k"
def destroy(self, env): conf = env.get('config') LOGGER.debug("Building enoslib configuration") enoslib_conf = _build_enoslib_conf(conf) conf = Configuration.from_dictionnary(enoslib_conf) LOGGER.debug("Creating G5K provider") g5k = provider.G5k(conf) LOGGER.info("Destroying G5K deployment") g5k.destroy()
def test_from_dictionnary_minimal(self): d = {"resources": {"machines": [], "networks": []}} conf = Configuration.from_dictionnary(d) self.assertEqual(constants.DEFAULT_ENV_NAME, conf.env_name) self.assertEqual(constants.DEFAULT_JOB_NAME, conf.job_name) self.assertEqual(constants.DEFAULT_JOB_TYPE, conf.job_type) self.assertEqual(constants.DEFAULT_QUEUE, conf.queue) self.assertEqual(constants.DEFAULT_WALLTIME, conf.walltime) self.assertEqual([], conf.machines) self.assertEqual([], conf.networks)
def init(self, config, force=False): LOGGER.debug("Building enoslib configuration") enoslib_conf = _build_enoslib_conf(config) conf = Configuration.from_dictionnary(enoslib_conf) LOGGER.debug("Creating G5K provider") g5k = provider.G5k(conf) LOGGER.info("Initializing G5K provider") roles, networks = g5k.init(force) _provision(roles) return roles, networks
def test_from_dictionnary_unbound_network(self): d = { "resources": { "machines": [{ "roles": ["r1"], "nodes": 2, "cluster": "cluste1", "primary_network": "n1", }], "networks": [{ "id": "unbound_network", "roles": ["rn1"], "site": "siteA", "type": "prod", }], } } with self.assertRaises(ValueError) as _: Configuration.from_dictionnary(d)
def make_conf(testing=True): kavlan = NetworkConfiguration(id="net", type="kavlan", roles=["network_interface"], site="nantes") prod_net = NetworkConfiguration(id="net_ext", type="prod", roles=["neutron_external_interface"], site="nantes") os = MachineConfiguration(roles=["OpenStack"], cluster="ecotype", nodes=10 if testing else 15, primary_network=kavlan, secondary_networks=[prod_net]) conf = None if testing: os.cluster = "paravance" kavlan.site = "rennes" prod_net.site = "rennes" conf = ( Configuration.from_settings( walltime="5:00:00", job_name="os-imt-aio-test", env_name="ubuntu1804-x64-min", # You can specify a jobid with # oargrid_jobids=[["nantes", "189621"]] ).add_network_conf(kavlan).add_network_conf(prod_net).add_machine( **os.__dict__)) else: conf = (Configuration.from_settings( reservation="2020-02-03 12:00:01", walltime="59:59:58", job_name="os-imt-aio", env_name="ubuntu1804-x64-min").add_network_conf( prod_net).add_machine(**os.__dict__)) conf.finalize() return conf
def test_from_dictionnary_some_metadatas(self): d = { "job_name": "test", "queue": "production", "resources": { "machines": [], "networks": [] } } conf = Configuration.from_dictionnary(d) self.assertEqual(constants.DEFAULT_ENV_NAME, conf.env_name) self.assertEqual("test", conf.job_name) self.assertEqual(constants.DEFAULT_JOB_TYPE, conf.job_type) self.assertEqual("production", conf.queue) self.assertEqual(constants.DEFAULT_WALLTIME, conf.walltime)
def make_conf(testing=True) -> Configuration: conf = { "reservation": "2021-03-05 07:00:01", "walltime": "11:59:58", "job_name": "lab-2021-imta-fise-login-os", "env_name": "ubuntu2004-x64-min", "project": "lab-2021-imta-fise-login-os", "resources": { "networks": [ { "id": "net", "type": "prod", "roles": ["network_interface"], "site": "rennes", }, # Keep this for future work, for a deployment # based OpenStack. # { # # Note: *NEVER* assigns this to a machine! # "id": "net_ext", # "type": "slash_22", # "roles": ["neutron_external_interface"], # "site": "rennes", # }, ], "machines": [{ "roles": ["OpenStack"], "cluster": "paravance", "nodes": 13, "primary_network": "net", "secondary_networks": [], }], } } if testing: del (conf["reservation"]) conf["walltime"] = "07:00:00" conf["job_name"] = "test-lab-2021-imta-fise-login-os" conf["resources"]["machines"][0]["nodes"] = 1 return Configuration.from_dictionnary(conf)
def test_from_dictionnary_with_machines_and_secondary_networks(self): d = { "resources": { "machines": [{ "roles": ["r1"], "nodes": 2, "cluster": "cluste1", "primary_network": "n1", "secondary_networks": ["n2"], }], "networks": [ { "id": "n1", "roles": ["rn1"], "site": "siteA", "type": "prod" }, { "id": "n2", "roles": ["rn2"], "site": "siteA", "type": "kavlan" }, ], } } conf = Configuration.from_dictionnary(d) self.assertTrue(len(conf.machines) == 1) self.assertTrue(len(conf.networks) == 2) machine_group = conf.machines[0] networks = conf.networks self.assertEqual(2, machine_group.nodes) # check the network ref self.assertTrue(machine_group.primary_network in networks) self.assertEqual("n1", machine_group.primary_network.id) self.assertEqual(1, len(machine_group.secondary_networks)) self.assertTrue(machine_group.secondary_networks[0] in networks) self.assertEqual("n2", machine_group.secondary_networks[0].id)
def test_programmatic(self): conf = Configuration() network = NetworkConfiguration(id="id", roles=["my_network"], type="prod", site="rennes") conf.add_network_conf(network)\ .add_machine_conf(MachineConfiguration(roles=["r1"], cluster="paravance", primary_network=network))\ .add_machine_conf(MachineConfiguration(roles=["r2"], cluster="parapluie", primary_network=network, nodes=10)) conf.finalize() self.assertEqual(2, len(conf.machines)) self.assertEqual(1, len(conf.networks))
from pathlib import Path from utils import _get_address from box import Box from boxes import Boxes, BoxesType ## Experiment on a small unbalanced tree of working-boxes with small ## parameters. Some boxes are voluntarily stopped at given time. We ## still expect a convergence, for boxes are independent. Crash of ## ones do not impact the well-functioning of others. CLUSTER = "econome" SITE = "nantes" conf = Configuration.from_settings(job_type='allow_classic_ssh', job_name='working-boxes failures_3', walltime='02:00:00') network = NetworkConfiguration(id='n1', type='prod', roles=['my_network'], site=SITE) conf.add_network_conf(network)\ .add_machine(roles=['collector', 'front', 'working'], cluster=CLUSTER, nodes=1, primary_network=network)\ .finalize() SEED = 6 NB_QUERY = 1500 NB_CRASHES = 3
NetworkConfiguration) import logging logging.basicConfig(level=logging.DEBUG) network = NetworkConfiguration(id="n1", type="kavlan", roles=["mynetwork"], site="rennes") conf = Configuration.from_settings(job_name="flent_on", env_name="debian9-x64-std")\ .add_network_conf(network)\ .add_machine(roles=["server"], cluster="parapluie", nodes=1, primary_network=network)\ .add_machine(roles=["client"], cluster="parapluie", nodes=1, primary_network=network)\ .finalize() provider = G5k(conf) roles, networks = provider.init() discover_networks(roles, networks) with play_on("all", roles=roles) as p: # flent requires python3, so we default python to python3 p.shell( "update-alternatives --install /usr/bin/python python /usr/bin/python3 1" ) p.apt_repository(
Configuration.from_settings( job_type="allow_classic_ssh", job_name="QTestJob", walltime='2:00:00' #env_name="/grid5000/images/debian9-x64-base-2020032721.tgz" ) #.add_network_conf(network_rennes) #.add_network_conf(network_nantes) #.add_network_conf(network_sophia) .add_network_conf(network_luxembourg) #.add_network_conf(network_grenoble) #.add_network_conf(network_nancy) #.add_network_conf(network_lyon) #.add_network_conf(network_lille) # .add_machine( # roles=["control"], # cluster="paravance", # nodes=1, # primary_network=network_rennes # ) # .add_machine( # roles=["control"], # cluster="econome", # nodes=2, # primary_network=network_nantes # ) # .add_machine( # roles=["control"], # cluster="uvb", # nodes=2, # primary_network=network_sophia # ) .add_machine(roles=["control"], cluster="petitprince", nodes=4, primary_network=network_luxembourg) # .add_machine( # roles=["control"], # cluster="dahu", # nodes=2, # primary_network=network_grenoble # ) # .add_machine( # roles=["control"], # cluster="gros", # nodes=2, # primary_network=network_nancy # ) # .add_machine( # roles=["control"], # cluster="nova", # #cluster="taurus", # nodes=2, # primary_network=network_lyon # ) # .add_machine( # roles=["control"], # cluster="chiclet", # nodes=2, # primary_network=network_lille # ) .finalize())
ip = ['10'] + [str(int(i, 2)) for i in mac.bits().split('-')[-3:]] yield str(mac).replace('-', ':'), '.'.join(ip) # claim the resources prod = NetworkConfiguration(id="n1", type="prod", roles=["my_network"], site="rennes") conf = Configuration.from_settings(job_type="allow_classic_ssh", job_name="enoslib-virt", walltime="01:00:00")\ .add_network_conf(prod)\ .add_network(id="_subnet_network", type="slash_22", roles=["my_subnet"], site="rennes")\ .add_machine(roles=["compute"], cluster="parasilo", nodes=PMS, primary_network=prod)\ .finalize() provider = G5k(conf) roles, networks = provider.init() # path to the inventory inventory = os.path.join(os.getcwd(), "hosts") # generate an inventory compatible with ansible generate_inventory(roles, networks, inventory, check_networks=True)
from enoslib.api import generate_inventory from enoslib.infra.enos_g5k.provider import G5k from enoslib.infra.enos_g5k.configuration import (Configuration, NetworkConfiguration) import logging import os logging.basicConfig(level=logging.INFO) # path to the inventory inventory = os.path.join(os.getcwd(), "hosts") # claim the resources conf = Configuration.from_settings(job_type="allow_classic_ssh", job_name="test-non-deploy") network = NetworkConfiguration(id="n1", type="prod", roles=["my_network"], site="rennes") conf.add_network_conf(network)\ .add_machine(roles=["control"], cluster="parapluie", nodes=1, primary_network=network)\ .add_machine(roles=["control", "network"], cluster="parapluie", nodes=1, primary_network=network)\ .finalize() provider = G5k(conf)
from enoslib.infra.enos_g5k.configuration import (Configuration, NetworkConfiguration) from enoslib.service import Locust from energy import Energy import logging logging.basicConfig(level=logging.DEBUG) CLUSTER1 = "econome" CLUSTER2 = "ecotype" SITE = "nantes" # claim the resources conf = Configuration.from_settings(job_type='allow_classic_ssh', job_name='energy-service', walltime='01:00:00') network = NetworkConfiguration(id='n1', type='prod', roles=['my_network'], site=SITE) conf.add_network_conf(network)\ .add_machine(roles=['control'], cluster=CLUSTER1, nodes=1, primary_network=network)\ .add_machine(roles=['compute'], cluster=CLUSTER1, nodes=1, primary_network=network)\ .finalize()
with EVENT_DATABASE_PATH.open('r') as f: event_db = json.load(f.read()) logging.info(f"Loading event database from local file…") else: logging.info(f"Event database not found locally, initialize one…") cs = get_all_clusters_sites() for cluster in CLUSTERS: if cluster not in cs: raise Exception( f'Cluster {cluster} was not found in list of clusters…') ## parallel callibration of clusters conf = Configuration.from_settings( job_type='allow_classic_ssh', job_name=f'calibrate energy-service at {cluster}', walltime='01:00:00') ## (TODO) check the default available network at each site network = NetworkConfiguration(id='n1', type='prod', roles=['my_network'], site=cs[cluster]) conf.add_network_conf(network)\ .add_machine(roles=['calibrate'], cluster=cluster, nodes=1, ## we deploy everything on 1 machine primary_network=network)\ .finalize() provider = G5k(conf)
# path to the inventory inventory = os.path.join(os.getcwd(), "hosts") # claim the resources network = NetworkConfiguration(id="n1", type="kavlan", roles=["my_network"], site="rennes") conf = Configuration.from_settings(job_name="test-enoslib")\ .add_network_conf(network)\ .add_machine(roles=["control"], cluster="paravance", nodes=1, primary_network=network)\ .add_machine(roles=["control", "compute"], cluster="paravance", nodes=1, primary_network=network)\ .finalize() provider = G5k(conf) roles, networks = provider.init() # generate an inventory compatible with ansible generate_inventory(roles, networks, inventory, check_networks=True) # destroy the reservation provider.destroy()
"nodes": 1, "primary_network": "n1", "secondary_networks": ["n2"] }], "networks": [{ "id": "n1", "type": "kavlan", "roles": ["my_network"], "site": "rennes", }, { "id": "n2", "type": "kavlan", "roles": ["my_second_network"], "site": "rennes", }] } } # path to the inventory inventory = os.path.join(os.getcwd(), "hosts") # claim the resources conf = Configuration.from_dictionnary(provider_conf) provider = G5k(conf) roles, networks = provider.init() # generate an inventory compatible with ansible generate_inventory(roles, networks, inventory, check_networks=True) # destroy the reservation
## Experiment on a small unbalanced tree of working-boxes with small ## parameters. Boxes get more possibility to distribute objectives to ## other boxes and keep the ones with lowest variance. In turns, there ## is a trade-off between accuracy and fairness. Here, the fairness ## factor is 0.2 . CLUSTER = "econome" SITE = "nantes" conf = Configuration.from_settings(job_type='allow_classic_ssh', job_name=f'working-boxes {__file__}', walltime='02:00:00') network = NetworkConfiguration(id='n1', type='prod', roles=['my_network'], site=SITE) conf.add_network_conf(network)\ .add_machine(roles=['collector', 'front', 'working'], cluster=CLUSTER, nodes=1, primary_network=network)\ .finalize() SEED = 5
from enoslib.api import generate_inventory from enoslib.infra.enos_g5k.provider import G5k from enoslib.infra.enos_g5k.configuration import (Configuration, NetworkConfiguration) import logging import os logging.basicConfig(level=logging.INFO) # path to the inventory inventory = os.path.join(os.getcwd(), "hosts") # claim the resources conf = Configuration.from_settings(job_type="allow_classic_ssh") prod_network = NetworkConfiguration(id="n1", type="prod", roles=["my_network"], site="rennes") conf.add_network_conf(prod_network)\ .add_network(id="not_linked_to_any_machine", type="slash_22", roles=["my_subnet"], site="rennes")\ .add_machine(roles=["control"], cluster="parapluie", nodes=1, primary_network=prod_network)\ .finalize() provider = G5k(conf)
# path to the inventory inventory = os.path.join(os.getcwd(), "hosts") # claim the resources network = NetworkConfiguration(id="n1", type="prod", roles=["my_network"], site="rennes") force_deploy = True if os.environ.get("G5K_FORCE") else False conf = Configuration.from_settings(job_name="Miguel_test", env_name="debian9-x64-std", force_deploy=force_deploy, walltime="02:00:00")\ .add_network_conf(network)\ .add_machine(roles=["control"], cluster="parasilo", nodes=1, primary_network=network)\ .finalize() provider = G5k(conf) roles, network = provider.init() VAGRANT_URL = "https://releases.hashicorp.com/vagrant/2.0.3/vagrant_2.0.3_x86_64.deb" VBOX_URL = "https://download.virtualbox.org/virtualbox/5.2.10/virtualbox-5.2_5.2.10-122088~Debian~stretch_amd64.deb" with play_on("all", roles=roles) as p: p.shell( "update-alternatives --install /usr/bin/python python /usr/bin/python3 1" )