def startInstance(nb, diskImg, instType, index, r=None, link=None): if r is None: r = rspec.Request() link = rspec.LAN("lan") for i in range(nb): node = rspec.RawPC("node" + str(i + index + 1)) node.disk_image = diskImg node.hardware_type = instType iface = node.addInterface("if" + str(index + i + 1)) # Specify the component id and the IPv4 address iface.component_id = "eth" + str(i + index + 1) iface.addAddress( rspec.IPv4Address("192.168.1." + str(index + i + 1), "255.255.255.0")) link.addInterface(iface) node.addService( rspec.Install( url= "https://github.com/neilgiri/hotstuff/archive/master.tar.gz", path="/users/giridhn")) node.addService( rspec.Execute( shell="bash", command= "sudo tar -C /users/giridhn -xvzf /users/giridhn/hotstuff-master.tar.gz ; sudo apt-get update ; sudo apt-get install --yes golang-go" )) r.addResource(node) return r, link
def add_node_to_rspec(config_info, site_dict, link_ifaces, vn, rspec): ''' add node resource to RSpec ''' for site_id in site_dict: node_in_site = site_dict[site_id] for node_id in node_in_site: node = vn.node_dict[node_id] vm = ig.XenVM(node.hostname) # Nodes are bounded to particular InstaGENI Sites, add component_manager_id to RSpec if site_id != 'any': vm.component_manager_id = site_info.ig_site[int( site_id)].component_manager_id for iface in node.iface_list: vm_iface = vm.addInterface(iface.id) vm_iface.addAddress(pg.IPv4Address(iface.addr, iface.prefix)) link_ifaces[iface.link_id].append(vm_iface) if node.node_type == 'lan-sw': # invisible node for LAN topology, no need to add service, etc. pass else: # add node properties to non-"sw" type nodes vm.disk_image = node.disk_image service_list = config_info[ node.node_type]['install_script'].split('\n') cmd_list = config_info[node.node_type]['execute_cmd'].split( '\n') if "routable_control_ip" in config_info[node.node_type]: vm.routable_control_ip = config_info[ node.node_type]['routable_control_ip'] in YES for service in service_list: if service != '': service_url = service.split(',')[0].strip() service_path = service.split(',')[1].strip() vm.addService( pg.Install(url=service_url, path=service_path)) for cmd in cmd_list: if cmd != '': cmd_exe = cmd.split(',')[0].strip() cmd_shell = cmd.split(',')[1].strip() vm.addService( pg.Execute(shell=cmd_shell, command=cmd_exe)) rspec.addResource(vm) return rspec
def cluster(N_NODES, AM, SLICE_NAME, NODE_NAME, XML_NAME, SOFTWARE, PUBLIC_IP): rspec = PG.Request() IFACE = "if%d" INSTALL = "install-%s" for i in range(0, N_NODES): if i == 0: vm = IGX.XenVM("master") vm.disk_image = "urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU14-64-STD" rspec.addResource(vm) vm.routable_control_ip = PUBLIC_IP if N_NODES > 1: vm_iface = vm.addInterface(IFACE % i) link = PG.LAN("lan0") link.addInterface(vm_iface) else: vm = IGX.XenVM(NODE_NAME % (i - 1)) vm.disk_image = "urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU14-64-STD" rspec.addResource(vm) vm_iface = vm.addInterface(IFACE % i) link.addInterface(vm_iface) # Prepare nodes with corresponding software and install files # Create scripts for each software #for i in SOFTWARE: # /bin/bash # vm.addService(PG.Install(url=software(i), path="/tmp")) # vm.addService(PG.Execute(shell="/bin/bash", command="sudo sh /tmp/%s" % INSTALL % i + ".sh")) # Docker installation (for Trusty) vm.addService(PG.Install(url="", path="/tmp/docker")) vm.addService( PG.Execute(shell="/bin/bash", command="bash /tmp/docker/docker_inst_trusty.sh")) if N_NODES > 1: rspec.addResource(link) # Deploy resources at GENI manifest = AM.createsliver(context, SLICE_NAME, rspec) geni.util.printlogininfo(manifest=manifest) # Create manifest in XML file rspec.writeXML(XML_NAME)
# Check parameter validity if params.n < 1 or params.n > 128: pc.reportError(portal.ParameterError("You must choose from 1 to 128")) # Check parameter for image if params.i == 0: disk_image = "urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU12-64-STD" else: disk_image = "urn:publicid:IDN+emulab.net+image+emulab-ops:CENTOS71-64-STD" # Create nodes and links link = pg.LAN("lan") # Add install and execute install = pg.Install(url="http://myweb.ttu.edu/ddai/codes/tool.tar.gz", path="/local") execute = pg.Execute(shell="bash", command="/local/CloudLab2OpenHPC/install.sh") for i in range(params.n): node = pg.RawPC("node" + str(i)) node.hardware_type = "r320" node.disk_image = disk_image bs = node.Blockstore("bs", "/local") bs.size = "60GB" node.addService(install) node.addService(execute) iface = node.addInterface("if" + str(i)) link.addInterface(iface) rspec.addResource(node)
# Setup node names. rc_aliases = [] for i in range(params.size): rc_aliases.append("sandstorm%02d" % (i + 1)) # Setup the cluster one node at a time. for i in range(params.size): node = rspec.RawPC(rc_aliases[i]) node.hardware_type = params.type node.disk_image = urn.Image(cloudlab.Utah, "emulab-ops:%s" % params.image) # Install and run the startup scripts. node.addService(rspec.Install( url="https://github.com/chinkulkarni/cloudLab-scripts/" +\ "archive/master.tar.gz", path="/local")) node.addService(rspec.Execute( shell="sh", command="sudo mv /local/cloudLab-scripts-master " +\ "/local/scripts")) node.addService( rspec.Execute(shell="sh", command="sudo /local/scripts/sandstorm_setup.sh")) request.addResource(node) # Add this node to the LAN. iface = node.addInterface("eth0") lan.addInterface(iface)
def nodes_rspec(context, number): interfaces = [i for i in xrange(number)] r = PG.Request() nodeA = PG.Node("A", "emulab-xen") nodeA.addService( PG.Install( url= "http://pages.cs.wisc.edu/~rkrish/GENI/ospf-script-{0}intf.tar.gz". format(2 * number), path="/local")) nodeA.addService( PG.Execute(shell="sh", command="/local/ospf-script-{0}intf.sh".format(2 * number))) nodeA.exclusive = False nodeA_host_intf_list = [] nodeA_router_intf_list = [] for i in interfaces: nodeA_host_intf = nodeA.addInterface("if_AH{0}".format(i)) nodeA_host_intf.addAddress( PG.IPv4Address("192.165.{0}.1".format(i + 1), "255.255.255.0")) nodeA_host_intf_list.append(nodeA_host_intf) nodeA_router_intf = nodeA.addInterface("if_AR{0}".format(i)) nodeA_router_intf.addAddress( PG.IPv4Address("192.166.{0}.1".format(i + 1), "255.255.255.0")) nodeA_router_intf_list.append(nodeA_router_intf) r.addResource(nodeA) nodeB = PG.Node("B", "emulab-xen") nodeB.addService( PG.Install( url= "http://pages.cs.wisc.edu/~rkrish/GENI/ospf-script-{0}intf.tar.gz". format(2 * number), path="/local")) nodeB.addService( PG.Execute(shell="sh", command="/local/ospf-script-{0}intf.sh".format(2 * number))) nodeB.exclusive = False nodeB_host_intf_list = [] nodeB_router_intf_list = [] for i in interfaces: nodeB_router_intf = nodeB.addInterface("if_BR{0}".format(i)) nodeB_router_intf.addAddress( PG.IPv4Address("192.167.{0}.1".format(i + 1), "255.255.255.0")) nodeB_router_intf_list.append(nodeB_router_intf) nodeB_host_intf = nodeB.addInterface("if_BH{0}".format(i)) nodeB_host_intf.addAddress( PG.IPv4Address("192.168.{0}.1".format(i + 1), "255.255.255.0")) nodeB_host_intf_list.append(nodeB_host_intf) r.addResource(nodeB) router_intf_A_list = [] router_intf_B_list = [] host_intf_A_list = [] host_intf_B_list = [] for i in interfaces: router = PG.Node("Router{0}".format(i + 1), "emulab-xen") router.addService( PG.Install( url= "http://pages.cs.wisc.edu/~rkrish/GENI/ospf-script-2intf.tar.gz", path="/local")) router.addService( PG.Execute(shell="sh", command="/local/ospf-script-2intf.sh".format(number))) router.exclusive = False router_intf_A = router.addInterface("if_RA{0}".format(i)) router_intf_A.addAddress( PG.IPv4Address("192.166.{0}.2".format(i + 1), "255.255.255.0")) router_intf_A_list.append(router_intf_A) router_intf_B = router.addInterface("if_RB{0}".format(i)) router_intf_B.addAddress( PG.IPv4Address("192.167.{0}.2".format(i + 1), "255.255.255.0")) router_intf_B_list.append(router_intf_B) r.addResource(router) hostHA = PG.Node("H{0}".format(2 * i + 1), "emulab-xen") hostHA.addService( PG.Execute(shell="sh", command="sudo yum install iperf -y")) hostHA.exclusive = False host_intf_A = hostHA.addInterface("if_HA{0}".format(i)) host_intf_A.addAddress( PG.IPv4Address("192.165.{0}.2".format(i + 1), "255.255.255.0")) host_intf_A_list.append(host_intf_A) r.addResource(hostHA) hostHB = PG.Node("H{0}".format(2 * i + 2), "emulab-xen") hostHB.addService( PG.Execute(shell="sh", command="sudo yum install iperf -y")) hostHB.exclusive = False host_intf_B = hostHB.addInterface("if_HB{0}".format(i)) host_intf_B.addAddress( PG.IPv4Address("192.168.{0}.2".format(i + 1), "255.255.255.0")) host_intf_B_list.append(host_intf_B) r.addResource(hostHB) for i in interfaces: linkHA = PG.Link("linkHA{0}".format(interfaces[i])) linkHA.addInterface(host_intf_A_list[i]) linkHA.addInterface(nodeA_host_intf_list[i]) linkHA.bandwidth = 20000 r.addResource(linkHA) linkHB = PG.Link("linkHB{0}".format(interfaces[i])) linkHB.addInterface(host_intf_B_list[i]) linkHB.addInterface(nodeB_host_intf_list[i]) linkHB.bandwidth = 20000 r.addResource(linkHB) linkRA = PG.Link("linkRA{0}".format(interfaces[i])) linkRA.addInterface(router_intf_A_list[i]) linkRA.addInterface(nodeA_router_intf_list[i]) linkRA.bandwidth = 20000 r.addResource(linkRA) linkRB = PG.Link("linkRB{0}".format(interfaces[i])) linkRB.addInterface(router_intf_B_list[i]) linkRB.addInterface(nodeB_router_intf_list[i]) linkRB.bandwidth = 20000 r.addResource(linkRB) name = "Performance-{0}.rspec".format(number) r.writeXML(name)
This particular profile is a simple example of using a single raw PC. It can be instantiated on any cluster; the node will boot the default operating system, which is typically a recent version of Ubuntu. Instructions: Wait for the profile instance to start, then click on the node in the topology and choose the `shell` menu item. """ # Import the Portal object. import geni.portal as portal # Import the ProtoGENI library. import geni.rspec.pg as rspec # Create a portal context. pc = portal.Context() # Create a Request object to start building the RSpec. request = pc.makeRequestRSpec() num_nodes = 10 lan = request.LAN() for i in range(num_nodes): node = request.RawPC("node" + str(i)) iface = node.addInterface("if" + str(i)) iface.addAddress(rspec.IPv4Address("192.168.1." + str(i + 1), "255.255.255.0")) lan.addInterface(iface) node.addService(rspec.Install(url="https://golang.org/dl/go1.16.2.linux-amd64.tar.gz", path="/local")) node.addService(rspec.Execute(shell="bash", command="/local/repository/scripts/install_go.sh")) # Print the RSpec to the enclosing page. pc.printRequestRSpec(request)
# Copyright (c) 2016 Barnstormer Softworks, Ltd. # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import geni.rspec.pg as PG DISK_IMAGE = "urn:publicid:IDN+instageni.gpolab.bbn.com+image+emulab-ops:UBUNTU12-64-STD" ndata = [("bbn-ig-ps103-a", "10.42.103.111"), ("bbn-ig-ps103-b", "10.42.103.112")] r = PG.Request() for (name, ip) in ndata: vm = PG.XenVM(name) vm.disk_image = DISK_IMAGE vm.addService(PG.Install(url="http://www.gpolab.bbn.com/~jbs/dingbot-jbs.tar.gz", path="/opt")) vm.addService(PG.Install(url="http://www.gpolab.bbn.com/~jbs/dingbot.tar.gz", path="/opt")) vm.addService(PG.Execute(shell="/bin/bash", command="sudo /opt/dingbot/dingbot /opt/dingbot/dingbot-jbs.json %s" % (vm.name))) intf = vm.addInterface("if0") intf.addAddress(PG.IPv4Address(ip, "255.255.255.0")) lnk = PG.Link() lnk.addInterface(intf) lnk.connectSharedVlan("mesoscale-openflow") r.addResource(vm) r.addResource(lnk) r.write("ps103.xml")
computeNodeNames.append(cpname) pass for cpname in computeNodeNames: cpnode = rspec.RawPC(cpname) cpnode.hardware_type = params.hardwareType cpnode.disk_image = chosenDiskImage if params.computeNodeCount > 1: iface = cpnode.addInterface("if0") mgmtlan.addInterface(iface) if generateIPs: iface.addAddress(rspec.IPv4Address(get_next_ipaddr(mgmtlan.client_id), get_netmask(mgmtlan.client_id))) pass pass cpnode.addService(rspec.Install(url=TBURL, path="/opt/")) if params.FluidMem and params.Infiniswap: cpnode.addService(rspec.Execute(shell="sh",command=COMBINED_CMD)) elif params.FluidMem: cpnode.addService(rspec.Execute(shell="sh",command=FLUIDMEM_CMD)) elif params.Infiniswap: cpnode.addService(rspec.Execute(shell="sh",command=INFINISWAP_CMD)) else: cpnode.addService(rspec.Execute(shell="sh",command=BASE_CMD)) request.addResource(cpnode) computeNodeList += cpname + ' ' pass # # Add our parameters to the request so we can get their values to our nodes.
IMAGE = "urn:publicid:IDN+emulab.net+image+emulab-ops:CENTOS7-64-STD" #IMAGE = "urn:publicid:IDN+emulab.net+image+emulab-ops//hadoop-273" DOWNLOAD = "https://github.com/ifding/hadoopOnGeni/raw/master/download.tar.gz" lan = RSpec.LAN() rspec.addResource(lan) #name node #resource manager node = RSpec.RawPC("namenode") #node.hardware_type = "c8220x" node.disk_image = IMAGE bs = node.Blockstore("nn_bs", "/data") bs.size = "100GB" node.addService(RSpec.Install(DOWNLOAD, "/tmp")) node.addService( RSpec.Execute(shell="/bin/sh", command="sudo sh /tmp/download.sh")) node.addService( RSpec.Execute(shell="/bin/sh", command="sh /tmp/hadoopOnGeni/install.sh")) iface = node.addInterface("if0") lan.addInterface(iface) rspec.addResource(node) #data node #slave node for i in range(params.n): node = RSpec.RawPC("datanode" + str(i)) #node.hardware_type = "c8220" node.disk_image = IMAGE bs = node.Blockstore("bs_" + str(i), "/data")
tour.Description(IG.Tour.TEXT,kube_description) tour.Instructions(IG.Tour.MARKDOWN,kube_instruction) request.addTour(tour) # Node kube-server kube_m = request.RawPC('m') #kube_m.hardware_type = 'd430' kube_m.hardware_type = 'pc3000' kube_m.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU16-64-STD' #kube_m.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD' kube_m.Site('Site 1') iface0 = kube_m.addInterface('interface-0') bs0 = kube_m.Blockstore('bs0', '/mnt/extra') bs0.size = '200GB' bs0.placement = 'NONSYSVOL' kube_m.addService(pg.Install('https://github.com/ebozag/cloudlab-k8s-profile/raw/master/cloudlab-k8s-profile.tar.gz','/mnt/extra/')) kube_m.addService(pg.Execute(shell="bash", command="/mnt/extra/master.sh v1.11.5")) slave_ifaces = [] for i in range(1,params.computeNodeCount+1): kube_s = request.RawPC('s'+str(i)) #kube_s.hardware_type = 'd430' kube_s.hardware_type = 'pc3000' kube_s.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU16-64-STD' #kube_s.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD' kube_s.Site('Site 1') slave_ifaces.append(kube_s.addInterface('interface-'+str(i))) bs = kube_s.Blockstore('bs'+str(i), '/mnt/extra') bs.size = '200GB' bs.placement = 'NONSYSVOL' kube_s.addService(pg.Install('https://github.com/ebozag/cloudlab-k8s-profile/raw/master/cloudlab-k8s-profile.tar.gz','/mnt/extra/'))
pass for cpname in computeNodeNames: cpnode = RSpec.RawPC(cpname) cpnode.disk_image = chosenDiskImage cpnode.hardware_type = params.node_type if params.computeNodeCount > 1: iface = cpnode.addInterface("if0") mgmtlan.addInterface(iface) if generateIPs: iface.addAddress( RSpec.IPv4Address(get_next_ipaddr(mgmtlan.client_id), get_netmask(mgmtlan.client_id))) pass pass cpnode.addService(RSpec.Install(url=TBURL, path="/tmp")) if params.node_type in ['xl170', 'r320', 'c6220']: cpnode.addService(RSpec.Execute(shell="sh", command=TBCMD_rdma)) else: cpnode.addService(RSpec.Execute(shell="sh", command=TBCMD_default)) rspec.addResource(cpnode) computeNodeList += cpname + ' ' pass rspec.addResource(mgmtlan) # # Add our parameters to the request so we can get their values to our nodes. # The nodes download the manifest(s), and the setup scripts read the parameter
portal.ParameterError("You must choose a minimum of 1 node ")) pc.verifyParameters() lan = pg.LAN("lan") nodes = [] switch_node = pg.RawPC("switch") iface = switch_node.addInterface( "if-switch", pg.IPv4Address("10.2.100.100", "255.255.255.0")) lan.addInterface(iface) switch_node.hardware_type = params.node_type switch_node.disk_image = SWITCH_DISK_IMAGE switch_node.Desire(params.switch, 1.0) switch_node.addService( pg.Install("https://github.com/ccanel/etalon/archive/master.tar.gz", "/local/")) switch_node.addService( pg.Execute("/bin/bash", "/local/etalon-master/bin/switch_install.sh")) nodes.append(switch_node) rspec.addResource(switch_node) for i in range(1, params.num_nodes + 1): node = pg.RawPC("host%s" % i) iface = node.addInterface( "if-host%s" % i, pg.IPv4Address("10.2.100.%d" % i, "255.255.255.0")) lan.addInterface(iface) node.hardware_type = params.node_type node.disk_image = NODE_DISK_IMAGE node.Desire(params.switch, 1.0) node.addService(
# Create selected number of nodes for i in range(params.node_count): node.append(request.RawPC('node-%d' % i)) node[ -1].disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU16-64-STD' node[-1].hardware_type = params.node_type # Create a LAN for all the connections lan = request.LAN("lan") # Create a link between each of the nodes to make a ring for i in range(params.node_count): iface = node[i].addInterface("if1") iface.component_id = "eth1" iface.addAddress(pg.IPv4Address("192.168.1." + str(i + 1), "255.255.255.0")) lan.addInterface(iface) # Install and execute scripts on each node for i in range(params.node_count): node[i].addService( pg.Install( url= "https://www.dropbox.com/s/7t91cf0ugt66ypl/cloudlab_setup.tar.gz", path="/home/mpc")) node[i].addService(pg.Execute(shell="bash", command="/home/mpc/setup.sh")) # Print the generated rspec pc.printRequestRSpec(request)
node = RSpec.RawPC(name) if name == "rcnfs": # Ask for a 256GB file system mounted at /shome on rcnfs bs = node.Blockstore("bs", "/shome") bs.size = "200GB" bs2 = node.Blockstore("bs2", "/localdrive") bs2.size = "30GB" node.hardware_type = params.type node.disk_image = urn.Image(cloudlab.Utah, "emulab-ops:%s" % params.image) # node.component_id = urn.Node(cloudlab.Utah, name) node.addService( RSpec.Install( url= "https://github.com/yilongli/CloudLab-scripts/archive/master.tar.gz", path='/local')) node.addService( RSpec.Execute( shell="sh", command="sudo mv /local/CloudLab-scripts-master /local/scripts")) node.addService( RSpec.Execute(shell="sh", command="sudo /local/scripts/startup.sh %d" % params.num_nodes)) rspec.addResource(node) iface = node.addInterface("eth0") lan.addInterface(iface)
def Node( name, public ): if params.raw: return RSpec.RawPC( name ) elif public: vm = PublicVM( name ) return vm else: vm = geni.rspec.igext.XenVM( name ) return vm rspec = RSpec.Request() node = Node( params.serverName, True ) node.disk_image = IMAGE node.addService( RSpec.Install( HBA_URL, "/root" ) ) node.addService( RSpec.Execute( "sh", HBA_CMD_S ) ) node.addService( RSpec.Install( CHEF_URL, "/root" ) ) node.addService( RSpec.Execute( "sh", CHEF_CMD ) ) rspec.addResource( node ) if params.n > 0: lan = RSpec.LAN() rspec.addResource( lan ) iface = node.addInterface( "if0" ) lan.addInterface( iface ) for i in range( params.n ): node = Node( params.clientPrefix + "-" + str( i ), False ) node.disk_image = IMAGE node.addService( RSpec.Install( HBA_URL, "/root" ) ) node.addService( RSpec.Execute( "sh", HBA_CMD_C ) )
request.addTour(tour) # Node kube-server kube_m = request.RawPC('m') #kube_m.hardware_type = 'd430' kube_m.hardware_type = 'pc3000' kube_m.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU16-64-STD' #kube_m.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD' kube_m.Site('Site 1') iface0 = kube_m.addInterface('interface-0') bs0 = kube_m.Blockstore('bs0', '/mnt/extra') bs0.size = '200GB' bs0.placement = 'NONSYSVOL' kube_m.addService( pg.Install( 'https://github.com/ebozag/cloudlab-k8s-profile/raw/master/cloudlab-k8s-profile.tar.gz', '/mnt/extra/')) kube_m.addService( pg.Execute(shell="bash", command="/mnt/extra/master.sh v1.11.3")) slave_ifaces = [] for i in range(1, params.computeNodeCount + 1): kube_s = request.RawPC('s' + str(i)) #kube_s.hardware_type = 'd430' kube_s.hardware_type = 'pc3000' kube_s.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU16-64-STD' #kube_s.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD' kube_s.Site('Site 1') slave_ifaces.append(kube_s.addInterface('interface-' + str(i))) bs = kube_s.Blockstore('bs' + str(i), '/mnt/extra') bs.size = '200GB' bs.placement = 'NONSYSVOL'
vm.component_id = node.component_id vm.component_manager_id = node.component_manager_id # VZNode # Sorry about the stupidity about how to find OpenVZ hosts. I should fix this. vznode = [node for node in ad.nodes if not node.exclusive and "emulab-xen" not in node.sliver_types and node.hardware_types.has_key("pcvm")][0] vzc = PG.VZContainer("host3") intf = vzc.addInterface("if0") intf.addAddress(PG.IPv4Address(IPS[2], NETMASK)) r.addResource(vzc) intfs.append(intf) vzc.component_id = vznode.component_id vzc.component_manager_id = vznode.component_manager_id # Controller cvm = PG.XenVM("controller") cvm.routable_control_ip = True cvm.component_manager_id = vznode.component_manager_id cvm.addService(PG.Install(url="http://www.gpolab.bbn.com/experiment-support/OpenFlowHW/of-hw.tar.gz", path="/local")) cvm.addService(PG.Execute(shell="sh", command = "sudo /local/install-script.sh")) r.addResource(cvm) # Big LAN! lan = PG.LAN() for intf in intfs: lan.addInterface(intf) lan.connectSharedVlan("mesoscale-openflow") r.addResource(lan) r.write("%s.rspec" % (site.name))
tour = IG.Tour() tour.Description(IG.Tour.TEXT, kube_description) tour.Instructions(IG.Tour.MARKDOWN, kube_instruction) request.addTour(tour) # Node kube-server kube_m = request.RawPC('m') kube_m.hardware_type = hardware_type kube_m.disk_image = disk_image kube_m.Site('Site 1') iface0 = kube_m.addInterface('interface-0') bs0 = kube_m.Blockstore('bs0', '/mnt/extra') bs0.size = storage_capacity bs0.placement = 'NONSYSVOL' kube_m.addService(pg.Install(git_tar_scripts, '/mnt/extra/')) kube_m.addService( pg.Execute(shell="bash", command="/mnt/extra/master.sh %s" % k8s_version)) slave_ifaces = [] for i in range(1, params.computeNodeCount + 1): kube_s = request.RawPC('s' + str(i)) kube_s.hardware_type = hardware_type kube_s.disk_image = disk_image kube_s.Site('Site 1') slave_ifaces.append(kube_s.addInterface('interface-' + str(i))) bs = kube_s.Blockstore('bs' + str(i), '/mnt/extra') bs.size = storage_capacity bs.placement = 'NONSYSVOL' kube_s.addService(pg.Install(git_tar_scripts, '/mnt/extra/')) kube_s.addService(
Wait for the profile instance to start, then click on the node in the topology and choose the `shell` menu item. """ # Import the Portal object. import geni.portal as portal # Import the ProtoGENI library. import geni.rspec.pg as rspec # Create a portal context. pc = portal.Context() # Create a Request object to start building the RSpec. request = pc.makeRequestRSpec() num_nodes = 10 lan = request.LAN() for i in range(num_nodes): node = request.RawPC("node" + str(i)) node.hardware_type = "m510" iface = node.addInterface("if" + str(i)) iface.component_id = "eth" + str(i + 1) iface.addAddress(rspec.IPv4Address("192.168.1." + str(i + 1), "255.255.255.0")) lan.addInterface(iface) node.addService(rspec.Install(url="https://github.com/neilgiri/hotstuff/archive/master.tar.gz", path="/users/giridhn")) node.addService(rspec.Execute(shell="bash", command="sudo tar -C /users/giridhn -xvzf /users/giridhn/hotstuff-master.tar.gz ; sudo apt-get update ; sudo apt-get install --yes golang-go")) # Print the RSpec to the enclosing page. pc.printRequestRSpec(request)
else: vm = geni.rspec.igext.XenVM(name) vm.ram = params.mem if public: vm.routable_control_ip = True return vm rspec = RSpec.Request() lan = RSpec.LAN() rspec.addResource(lan) node = Node("namenode", True) node.disk_image = IMAGE node.addService(RSpec.Install(SETUP, "/tmp")) node.addService(RSpec.Execute("sh", "sudo /tmp/setup/hadoop-setup.sh")) node.addService( RSpec.Execute("sh", "sudo bash /local/repository/setup_hadoop.sh")) node.addService( RSpec.Execute("sh", "sudo bash /local/repository/create_account.sh")) iface = node.addInterface("if0") lan.addInterface(iface) rspec.addResource(node) node = Node("resourcemanager", True) node.disk_image = IMAGE node.addService(RSpec.Install(SETUP, "/tmp")) node.addService(RSpec.Execute("sh", "sudo /tmp/setup/hadoop-setup.sh")) node.addService(
try: ad = site.listresources(context) except Exception: # Continue past aggregates that are down continue cmid = ad.nodes[0].component_manager_id r = PG.Request() ovs_intfs = [] ovs = PG.XenVM("OVS") ovs.disk_image = "urn:publicid:IDN+utahddc.geniracks.net+image+emulab-ops:Ubuntu12-64-OVS" ovs.addService(PG.Execute(shell="sh", command = "sudo /local/install-script.sh")) ovs.addService(PG.Install(path="/local", url = "http://www.gpolab.bbn.com/experiment-support/OpenFlowOVS/of-ovs.tar.gz")) ovs.component_manager_id = cmid for idx in xrange(0,3): intf = ovs.addInterface("if%d" % (idx)) intf.addAddress(PG.IPv4Address(OVS_IPS[idx], NETMASK)) ovs_intfs.append(intf) r.addResource(ovs) for ct in xrange(0,3): vzc = PG.VZContainer("host%d" % (ct+1)) vzc.component_manager_id = cmid intf = vzc.addInterface("if0") intf.addAddress(PG.IPv4Address(HOST_IPS[ct], NETMASK)) r.addResource(vzc) link = PG.LAN() link.addInterface(intf)