Ejemplo n.º 1
0
def RunCBR(args, io=None, **kwargs):
    # Get the *scripts* directory, then just run the wrapper
    root_dir = FindRoot(os.getcwd())
    if root_dir == None:
        print "RunCBR: Couldn't find root directory from current directory."
        return

    cmd = [os.path.join(root_dir, CBR_WRAPPER)]
    cmd.extend(args)

    # Setup our IO, using default IO but overriding with parameters
    if io == None:
        io = stdio.StdIO()
    our_io = stdio.StdIO(io.stdin, io.stdout, io.stderr)
    if ('stdin' in kwargs):
        our_io.stdin = kwargs['stdin']
        del kwargs['stdin']
    if ('stdout' in kwargs):
        our_io.stdout = kwargs['stdout']
        del kwargs['stdout']
    if ('stderr' in kwargs):
        our_io.stderr = kwargs['stderr']
        del kwargs['stderr']

    invoke.invoke(cmd, io=our_io, **kwargs)
Ejemplo n.º 2
0
def RunCBR(args, io=None, **kwargs):
    # Get the *scripts* directory, then just run the wrapper
    root_dir = FindRoot( os.getcwd() )
    if root_dir == None:
        print "RunCBR: Couldn't find root directory from current directory."
        return

    cmd = [os.path.join(root_dir, CBR_WRAPPER)]
    cmd.extend(args)

    # Setup our IO, using default IO but overriding with parameters
    if io == None:
        io = stdio.StdIO()
    our_io = stdio.StdIO(io.stdin, io.stdout, io.stderr)
    if ('stdin' in kwargs):
        our_io.stdin = kwargs['stdin']
        del kwargs['stdin']
    if ('stdout' in kwargs):
        our_io.stdout = kwargs['stdout']
        del kwargs['stdout']
    if ('stderr' in kwargs):
        our_io.stderr = kwargs['stderr']
        del kwargs['stderr']

    invoke.invoke(cmd, io=our_io, **kwargs)
Ejemplo n.º 3
0
    def connect(self):
        '''
        This function will connect to the database, which name is stored
        in db_name global variable.
        Note: It connects to database in each case, e.g., if db file not exists,
        it creates a new one! So, pay attention to name the db file correctly
        '''
        self.log.debug(str("connecting to database %s" % self.db_name))

        #check whether database file exists
        if not (os.path.isfile(self.db_name)):
            self.log.warn(
                "DATABASE FILE NOT EXISTS...creating a new from scratch!")
            create_cmd = "cat db/create_nfpadatabase.sql | sqlite3 " + self.db_name
            invoke.invoke(command=create_cmd, logger=self.log)

            self.log.info("DATABASE FILE CREATED")

        else:
            self.log.debug("DATABASE FILE EXISTS")
            #the connection var itself - required for committing and closing
            self.conn = sqlite3.connect(self.db_name)
            #cursor - required for each database function calls
            self.c = self.conn.cursor()

        return True
Ejemplo n.º 4
0
    def connect(self):
        '''
        This function will connect to the database, which name is stored
        in db_name global variable.
        Note: It connects to database in each case, e.g., if db file not exists,
        it creates a new one! So, pay attention to name the db file correctly
        '''
        self.log.debug(str("connecting to database %s" % self.db_name))
        
        #check whether database file exists
        if not (os.path.isfile(self.db_name)):
	    self.log.warn("DATABASE FILE NOT EXISTS...creating a new from scratch!")
            create_cmd = "cat db/create_nfpadatabase.sql | sqlite3 " + self.db_name
            invoke.invoke(command=create_cmd,
                          logger=self.log)

            self.log.info("DATABASE FILE CREATED")
				
        else:
	    self.log.debug("DATABASE FILE EXISTS")
	    #the connection var itself - required for committing and closing
	    self.conn = sqlite3.connect(self.db_name)
	    #cursor - required for each database function calls
	    self.c = self.conn.cursor()
                
        return True
Ejemplo n.º 5
0
Archivo: nfpa.py Proyecto: P4ELTE/cbt
 def deleteResFiles(self):
     '''
     This function will delete all the temporary results files under
     pktgen's main directory
     '''
     #all files look like nfpa.[traffic_type].[packetsize]bytes.res
     #besides those, only 2 symlinks exist, which could also be deleted,
     #since each restart it is recreated. However, we do not delete them!
     del_cmd = "rm -rf " + self.config["PKTGEN_ROOT"] + "/nfpa.*.res"
     invoke.invoke(command=del_cmd, logger=self.log)
Ejemplo n.º 6
0
Archivo: nfpa.py Proyecto: cslev/nfpa
 def deleteResFiles(self):
     '''
     This function will delete all the temporary results files under
     pktgen's main directory
     '''
     #all files look like nfpa.[traffic_type].[packetsize]bytes.res
     #besides those, only 2 symlinks exist, which could also be deleted,
     #since each restart it is recreated. However, we do not delete them!
     del_cmd = "rm -rf " + self.config["PKTGEN_ROOT"] + "/nfpa.*.res"
     invoke.invoke(command=del_cmd,
                   logger=self.log)
Ejemplo n.º 7
0
    def drawChartViaGnuplot(self, gnuplot_arguments, **params):
        '''
        This function will call gnuplot with the passed arguments and creates
        .eps charts from the results.
        Should be called from self.createGnuplotDataFile() function, since
        that function creates the data files and knows what the filenames are.
        '''

        #if special ul_dl bidirectional traffic was set, then we need to use
        #the plotter_bidir gnuplot file for visualizing
        ul_dl = params.get('ul_dl', False)

        self.log.debug(gnuplot_arguments)
        #synthetic traffic/measurements have different GNUplot plotter files
        if (self.type == "synthetic"):
            for language in self.config['plot_language']:
                plotter_file = "/lib/plotter_" + language + ".gp"
                #if bi directional measurement was set, we use different gnuplot file
                if ((int(self.config["biDir"]) == 1) or ul_dl):
                    plotter_file = "/lib/plotter_bidir_" + language + ".gp"

                #assemble gnuplot command
                gnuplot_command = "gnuplot -e " + gnuplot_arguments + " " + \
                                  self.config['MAIN_ROOT'] + plotter_file
                self.log.debug("======= GNUPLOT (%s) =======" % language)
                self.log.debug(gnuplot_command)
                retval = (invoke.invoke(
                    command=gnuplot_command,
                    logger=self.log,
                    email_adapter=self.config['email_adapter']))[0]
                if retval is not None or retval != '':
                    self.log.info(retval)

        #Realistic traffic/measurements have different GNUplot plotter files
        elif (self.type == "realistic"):
            for language in self.config['plot_language']:
                plotter_file = "/lib/plotter_realistic_" + language + ".gp"
                #if bi directional measurement was set, we use different gnuplot file
                if ((int(self.config["biDir"]) == 1) or ul_dl):
                    plotter_file = "/lib/plotter_realistic_bidir_" + language + ".gp"

                #assemble gnuplot command
                gnuplot_command = "gnuplot -e " + gnuplot_arguments + " " + \
                                  self.config['MAIN_ROOT'] + plotter_file
                self.log.debug("======= GNUPLOT (%s) =======" % language)
                self.log.debug(gnuplot_command)
                retval = (invoke.invoke(
                    command=gnuplot_command,
                    logger=self.log,
                    email_adapter=self.config['email_adapter']))[0]
                if retval is not None or retval != '':
                    self.log.info(retval)
Ejemplo n.º 8
0
    def _delete_latency_request(self, from_to):
        self.log.debug(self.modules)
        self.log.debug("Deleting latency modules' data...")
        key = from_to
        if key in self.modules["latency-hbh"]:
            #removing namespaces
            for ns in self.modules["latency-hbh"][key]["namespaces"]:
                invoke.invoke(command="ip netns delete {}".format(ns))
            #giving back the used veth ids and ip_ends
            for sw in self.modules["latency-hbh"][key]["switches"]:
                i = 0
                for veth in self.modules["latency-hbh"][key]["switches"][sw][
                        'used_veths']:
                    #give these elements back to self.switch_data dictionary's VETHS
                    # self.log.error(self.switch_data[sw])
                    self.switch_data[sw]["veth_id"].append(veth)

                    #delete ports from switch (only the first one is connected to the switch
                    if i == 0:
                        cmd = "ovs-vsctl del-port {} {}-ping-veth{}".format(
                            sw, sw, veth)
                        invoke.invoke(command=cmd)

                        #remove edge from the graph
                        self.nmaas_network_graph.remove_edge(
                            sw, "nmaas-{}-ping-veth{}".format(sw, veth))
                        #remove nodes from the graph
                        self.nmaas_network_graph.remove_node(
                            "nmaas-{}-ping-veth{}".format(sw, veth))

                    i = i + 1

                #sort self.switch_data again as it was in the beginning
                self.switch_data[sw]['veth_id'].sort(reverse=True)

                #give these elements back to the self.switch_data dictionary's IPs
                ip = self.modules["latency-hbh"][key]["switches"][sw][
                    'used_ip']
                #according to the last IP id, we remove the corresponding flow rules
                self.delete_flow_rule(switch=sw,
                                      module_id="ping",
                                      module_ip_end=ip)
                # give these elements back to self.switch_data dictionary
                self.switch_data[sw]["ip_end"].append(ip)
                #sort self.switch_data again as it was in the beginning
                self.switch_data[sw]['ip_end'].sort(reverse=True)

            del self.modules["latency-hbh"][key]
        self.log.debug("[DONE]")
        self.log.debug(self.modules)
Ejemplo n.º 9
0
def prepareOpenFlowRules(logger, path, flowrulefile,inport,outport, bidir):
    '''
    This function will take the openflow flow rule files consisting the meta port data, and replaces
    them according to the control_vnf_inport and control_vnf_outport parameter. The original
    flow rule file will still be existed for further reusage, so a new specialized flow rule
    file will born temporary.

    :param logger Logger: the logger object from the calling class to make it possible to log
    :param flowrulefile String: the defined flow rule file with meta port data
    :param inport String: control_vnf_inport
    :param outport String: control_vnf_outport
    :param bidir Bool: to indicate whether INPORT2 and OUTPORT1 also exist in the file

    :return: new temporary flow rule file
    '''

    #shortening the got params
    l = logger
    f = flowrulefile
    fpath = path + flowrulefile

    #get a timestamp for having unique filenames for temporary flow rule files
    t=time.time()
    timestamp=str(df.getDateFormat(t))

    #start reading file and replacing (calling linux's sed is simpler than make it in python)
    l.info("Parsing file %s" % f)
    #temporary name for the temporary file
    tmp_file = path + "tmp/" + f + "_tmp_" + inport + "_" + outport + "_" + timestamp
    #first sed command for inport
    sed_cmd = 'sed "s/<INPORT1>/' + inport + '/" ' + fpath + ' > ' + tmp_file
    #invoke first sed
    invoke.invoke(command=sed_cmd,
                  logger=l)
    #second sed command for outport - From now, we already have the tmp file,
    #so we make the changes over it
    sed_cmd = 'sed -i "s/<OUTPORT2>/' + outport + '/" ' + tmp_file
    invoke.invoke(command=sed_cmd,
                  logger=l)

    #third and fourth sed if bidir is set
    if bidir:
        #note again that if there is no such inport and outport in the flow rule files
        #sed doesn't do anything, thus it won't mess the file even if we call it
        sed_cmd = 'sed -i "s/<INPORT2>/' + outport + '/" ' + tmp_file
        invoke.invoke(command=sed_cmd,
                      logger=l)
        sed_cmd = 'sed -i "s/<OUTPORT1>/' + inport + '/" ' + tmp_file
        invoke.invoke(command=sed_cmd,
                      logger=l)

    return tmp_file
Ejemplo n.º 10
0
def prepareOpenFlowRules(logger, path, flowrulefile, inport, outport, bidir):
    '''
    This function will take the openflow flow rule files consisting the meta port data, and replaces
    them according to the control_vnf_inport and control_vnf_outport parameter. The original
    flow rule file will still be existed for further reusage, so a new specialized flow rule
    file will born temporary.

    :param logger Logger: the logger object from the calling class to make it possible to log
    :param flowrulefile String: the defined flow rule file with meta port data
    :param inport String: control_vnf_inport
    :param outport String: control_vnf_outport
    :param bidir Bool: to indicate whether INPORT2 and OUTPORT1 also exist in the file

    :return: new temporary flow rule file
    '''

    #shortening the got params
    l = logger
    f = flowrulefile
    fpath = path + flowrulefile

    #get a timestamp for having unique filenames for temporary flow rule files
    t = time.time()
    timestamp = str(df.getDateFormat(t))

    #start reading file and replacing (calling linux's sed is simpler than make it in python)
    l.info("Parsing file %s" % f)
    #temporary name for the temporary file
    tmp_file = path + "tmp/" + f + "_tmp_" + inport + "_" + outport + "_" + timestamp
    #first sed command for inport
    sed_cmd = 'sed "s/<INPORT1>/' + inport + '/" ' + fpath + ' > ' + tmp_file
    #invoke first sed
    invoke.invoke(command=sed_cmd, logger=l)
    #second sed command for outport - From now, we already have the tmp file,
    #so we make the changes over it
    sed_cmd = 'sed -i "s/<OUTPORT2>/' + outport + '/" ' + tmp_file
    invoke.invoke(command=sed_cmd, logger=l)

    #third and fourth sed if bidir is set
    if bidir:
        #note again that if there is no such inport and outport in the flow rule files
        #sed doesn't do anything, thus it won't mess the file even if we call it
        sed_cmd = 'sed -i "s/<INPORT2>/' + outport + '/" ' + tmp_file
        invoke.invoke(command=sed_cmd, logger=l)
        sed_cmd = 'sed -i "s/<OUTPORT1>/' + inport + '/" ' + tmp_file
        invoke.invoke(command=sed_cmd, logger=l)

    return tmp_file
Ejemplo n.º 11
0
    def _analyze_ping_files(self, filename):
        '''
        This private function reads the ping results files, analyzes is, and calculates the average RTT
        :param filename: String - the filename storing the ping's output
        :return: average RTT
        '''
        # from host to the first hop
        get_ping_data_cmd = "cat {}|tail -n 3|cut -d '%' -f 2".format(filename)
        # this command read the last 3 line of the 4 ping, cuts the relevant time=X data, cuts out time=
        ping_data = invoke.invoke(
            command=get_ping_data_cmd)[0].split("\n")[:-1]
        # invoke returns a list, where the first (0th) element is the stdout, which stores escaped "\n" chars.
        #   for instance: '56.8 ms\n151 ms\n135 ms\n'
        # splitting it according to this char generates a 4-element-long list having an empty '' element at the end
        #   for instance: ['56.8 ms', '151 ms', '135 ms', '']
        # to get rid of that last element, we use [:-1]
        #   for instance ['56.8 ms', '151 ms', '135 ms']
        # self.log.debug(ping_data)

        avg = 0
        unit = ping_data[0].split(' ')[1]
        for i in ping_data:
            ping = i.split(' ')
            avg += float(ping[0])  # this will cut down the unit ('ms')
            if unit != ping[1]:
                self.log.debug(
                    "Something is really wrong! Not all pings have the same unit"
                )
                #TODO: handle this case

        #delete ping files!
        # self.log.debug("Deleting ping logs")
        invoke.invoke(command="rm -rf {}".format(filename))

        #return the average RTT value (3 measurements, and because of round-trip-time we divide it by 2 to get the
        #latency of one direction
        return avg / 3.0 / 2.0
Ejemplo n.º 12
0
 def drawChartViaGnuplot(self, gnuplot_arguments, **params):
     '''
     This function will call gnuplot with the passed arguments and creates
     .eps charts from the results.
     Should be called from self.createGnuplotDataFile() function, since
     that function creates the data files and knows what the filenames are.
     '''
     
     #if special ul_dl bidirectional traffic was set, then we need to use
     #the plotter_bidir gnuplot file for visualizing
     ul_dl = params.get('ul_dl', False)
     
     
     self.log.debug(gnuplot_arguments)
     #synthetic traffic/measurements have different GNUplot plotter files
     if(self.type == "synthetic"):
         plotter_file = "/lib/plotter.gp"
         #if bi directional measurement was set, we use different gnuplot file
         if((int(self.config["biDir"]) == 1) or ul_dl):
             plotter_file = "/lib/plotter_bidir.gp"
             
         #assemble gnuplot command
         gnuplot_command = "gnuplot -e " + gnuplot_arguments + " " + \
                           self.config['MAIN_ROOT'] + plotter_file
                           
     #Realistic traffic/measurements have different GNUplot plotter files
     elif(self.type == "realistic"):
         plotter_file = "/lib/plotter_realistic.gp"
         #if bi directional measurement was set, we use different gnuplot file
         if((int(self.config["biDir"]) == 1) or ul_dl):
             plotter_file = "/lib/plotter_realistic_bidir.gp"
             
         #assemble gnuplot command
         gnuplot_command = "gnuplot -e " + gnuplot_arguments + " " + \
                           self.config['MAIN_ROOT'] + plotter_file   
                           
                                          
                                          
     self.log.debug("======= GNUPLOT =======")
     self.log.debug(gnuplot_command)
     retval = (invoke.invoke(command=gnuplot_command,
                             logger=self.log,
                             email_adapter=self.config['email_adapter']))[0]
     if retval is not None or retval != '':
         self.log.info(retval)
Ejemplo n.º 13
0
    def add_nmaas_module(self, **kwargs):
        '''
        This function creates a network namespace on the given switch, and configures the ip addresses, gateways accordingly.
        Furthermore, it connects the namespace to the switch with veth-pairs
        :param kwargs:  module - String: name of the module, e.g., ping
                        switch - String: name of the switch to connect the module, e.g., s1
                        chain_prev - String: the name of the node residing on the path before this switch, e.g., h1, s2
                        chain_next - String: the name of the node residing on the path next to this switch, e.g., h3, s2
                        from_to - String: {host_from}-{host_to} - used for identifying which namespace belongs to which measurement
                        estimated_time - Int: used for setting up idle_timeout for flow rules
        :return: the new port_no created by connecting one end of the veth pair to the switch -> it is important for latter
        rule installation
        '''

        module = kwargs.get('module', 'ping')
        switch = kwargs.get('switch', None)
        #these are for the private function add_flow_rules(), not really used in this function
        chain_prev = kwargs.get('chain_prev', None)
        chain_next = kwargs.get('chain_next', None)
        from_to = kwargs.get('from_to', None)
        estimated_time = kwargs.get('estimated_time', 0)

        #register module
        self._register_latency_request(from_to)

        if module == "ping":
            ip_identifier = self.PING_ID
        else:
            self.log.debug(
                "Module named as {} is not supported!".format(module))
            exit(-1)

        #third segment of the module's IP address will also represent the switch id
        # get Datapath object from nmaas_controller
        # datapath = self.nmaas_network_controller.switches[switch_to_connect]['datapath']
        datapath = self.nmaas_network_controller.nmaas_graph.get_node(
            switch)['dp']
        # self.log.error(datapath)

        sw_dpid = datapath.id  # this will give back an integer DpID
        if sw_dpid > 255:
            self.log.debug(
                "ERROR: Switch DPID could not be greater than 255! EXITING!")
            exit(-1)

        ip = "10.{}.{}".format(ip_identifier, sw_dpid)

        #This is the basis of how the MAC addresses are assigned to the modules
        base_mac = "{:02x}:{:02x}:EE:QQ:QQ:QQ".format(ip_identifier, sw_dpid)

        #check whether the given switch was already took part in any of
        if switch not in self.switch_data.keys():
            self.switch_data[switch] = {
                "veth_id": list(),  # these lists will grow with time
                "ip_end": list()
            }

        # --------- Allocating new veths -----------
        #create unused IDs for veths and unused numbers IPs' 4th segment
        if not self.switch_data[switch]["veth_id"]:
            # TODO: what if emptyness is caused by consuming all resources ???
            #fill up with the possible numbers to create a pool (now only 127 pair of veths are possible)
            for i in range(1, 255):
                self.switch_data[switch]["veth_id"].append(i)

            #reverse list
            self.switch_data[switch]["veth_id"].sort(reverse=True)

        #switch is already using veths, we need to increase their numbers to create fresh ones
        veths = [
            self.switch_data[switch]["veth_id"].pop(),
            self.switch_data[switch]["veth_id"].pop()
        ]

        # --------- Allocating new IPs for veths ----------- SAME AS FOR VETHS
        # create unused IDs for veths and unused numbers IPs' 4th segment
        if not self.switch_data[switch]["ip_end"]:
            # TODO: what if emptyness is caused by consuming all resources ???
            # fill up with the possible numbers to create a pool (now only 127 pair of veths are possible)
            for i in range(1, 255):
                self.switch_data[switch]["ip_end"].append(i)

            # reverse list
            self.switch_data[switch]["ip_end"].sort(reverse=True)

            # switch is already using veths, we need to increase their numbers to create fresh ones
        module_ip_end = self.switch_data[switch]["ip_end"].pop()

        #NOW, we have self.veths 2-element-long list with the usable veth IDs
        # and the same applies for self.ip_ends

        if switch is None:
            self.log.debug("ERROR: Switch is not set")
            return

        self.log.debug("Add ping module to switch {}\n".format(switch))

        #create namespace
        ns_name = "{}-ping-{}".format(switch, from_to)
        self.log.debug("-- CREATE NAMESPACE")
        cmd = "ip netns add {}".format(ns_name)
        self.log.debug(cmd)
        self.log.debug(invoke.invoke(command=cmd)[0])

        #create veth pair
        self.log.debug("-- CREATE VETH PAIR")
        cmd = "ip link add {}-ping-veth{} type veth peer name {}-ping-veth{}".format(
            switch, veths[0], switch, veths[1])
        self.log.debug(cmd)
        self.log.debug(invoke.invoke(command=cmd)[0])

        #SETTING UP MAC ADDRESSES FOR THESE NODES
        mac_1 = base_mac.replace(
            "QQ:QQ:QQ", "{}:{}:{}".format(binascii.b2a_hex(os.urandom(1)),
                                          binascii.b2a_hex(os.urandom(1)),
                                          binascii.b2a_hex(os.urandom(1))))

        cmd = "ip link set dev {}-ping-veth{} address {}".format(
            switch, veths[0], mac_1)
        self.log.debug(cmd)
        self.log.debug(invoke.invoke(command=cmd)[0])
        mac_2 = base_mac.replace(
            "QQ:QQ:QQ", "{}:{}:{}".format(binascii.b2a_hex(os.urandom(1)),
                                          binascii.b2a_hex(os.urandom(1)),
                                          binascii.b2a_hex(os.urandom(1))))
        cmd = "ip link set dev {}-ping-veth{} address {}".format(
            switch, veths[1], mac_2)
        self.log.debug(cmd)
        self.log.debug(invoke.invoke(command=cmd)[0])

        # add secondary veth device into the namespace
        self.log.debug("-- ADDING VETH PEER INTO THE NAMESPACE")
        cmd = "ip link set {}-ping-veth{} netns {}".format(
            switch, veths[1], ns_name)
        self.log.debug(cmd)
        self.log.debug(invoke.invoke(command=cmd)[0])

        # # change MAC of the netns device
        # cmd = "ip netns exec {} ip link set dev {}-ping-veth{} address be:ef:be:ef:{}:{}".format(
        #                                                                         ns_name,
        #                                                                         switch,
        #                                                                         veths[1],
        #                                                                         binascii.b2a_hex(os.urandom(1)),
        #                                                                         binascii.b2a_hex(os.urandom(1)))
        # self.log.debug(" -- CHANGING MAC ADDRESS OF VETH in the namespace")
        # self.log.debug(cmd)
        # self.log.debug(invoke.invoke(command=cmd)[0])

        # WE DON'T NEED TO ADD IP ADDRESS TO OTHER END OF THE VETH AS IT IS CONNECTED TO OVS!
        # add ip addresses to veth devices
        # self.log.debug("-- SETTING UP IP ADDRESSES FOR VETHS")
        # cmd = "ip addr add {}.{}/16 dev {}-ping-veth{}".format(ip, self.ips[0], switch_to_connect, self.veths[0])
        # self.log.debug(cmd)
        # self.log.debug(invoke.invoke(command=cmd)[0])

        cmd = "ip netns exec {} ip addr add {}.{}/16 dev {}-ping-veth{}".format(
            ns_name, ip, module_ip_end, switch, veths[1])
        self.log.debug(cmd)
        self.log.debug(invoke.invoke(command=cmd)[0])

        # bring up veth devices
        self.log.debug("-- BRINGING UP VETH DEVICES")
        cmd = "ip link set dev {}-ping-veth{} up".format(switch, veths[0])
        self.log.debug(cmd)
        self.log.debug(invoke.invoke(command=cmd)[0])

        cmd = "ip netns exec {} ip link set dev {}-ping-veth{} up".format(
            ns_name, switch, veths[1])
        self.log.debug(cmd)
        self.log.debug(invoke.invoke(command=cmd)[0])

        # add default gateway to veth in the namespace
        self.log.debug("-- ADD DEFAULT GW TO NAMESPACE")
        cmd = "ip netns exec {} ip route add 0.0.0.0/0 dev {}-ping-veth{}".format(
            ns_name, switch, veths[1])
        self.log.debug(cmd)
        self.log.debug(invoke.invoke(command=cmd)[0])

        # add veth to switch
        self.log.debug("-- ADD VETH TO SWITCH")
        cmd = "ovs-vsctl add-port {} {}-ping-veth{}".format(
            switch, switch, veths[0])
        self.log.debug(cmd)
        self.log.debug(invoke.invoke(command=cmd)[0])

        # # this command above will initiate a PORT ADDED event to the controller, from where we could become aware
        # # of the new port number that needs to be used in the new flow rules
        sw = self.nmaas_network_controller.nmaas_graph.get_node(switch)
        port_number = sw['recent_port_data']["port_no"]
        # self.log.debug("new port number on switch {} is {}".format(switch_to_connect, port_number))
        #
        # return port_number

        # ADD NODES TO THE GRAPH IN ORDER TO ASSIST ARP RESPONDER
        host_name = "nmaas-{}-ping-veth{}".format(switch, veths[0])
        # self.log.info("Adding {} to network graph".format(host_name))
        self.nmaas_network_graph.add_node(
            host_name,
            name=host_name,
            ipv4=["{}.{}".format(ip, module_ip_end)],
            ipv6=[],
            mac=mac_2,
            connected_to=switch,
            port_no=port_number)

        # add corresponding links to the graph
        # self.log.info("Adding link {}-{} to network graph".format(host_name,switch))
        self.nmaas_network_graph.add_edge(host_name,
                                          switch,
                                          dst_port=port_number,
                                          dst_dpid=switch)

        #registering module data
        self.modules["latency-hbh"][from_to]["namespaces"].extend([ns_name])
        self.modules["latency-hbh"][from_to]["switches"][switch] = {
            "used_veths": list(veths),  #kinda copy veths
            "used_ip": module_ip_end
        }
        #Adding flow rules
        self._add_flow_rules(
            switch=switch,
            chain_prev=chain_prev,
            chain_next=chain_next,
            module_id=
            ip_identifier,  # second segment of the IP address of the module, e.g., 200 (PING)
            module_ip_end=
            module_ip_end,  # last segment of the IP address identifying exactly the module
            estimated_time=estimated_time
            # estimated_time = 0
        )
Ejemplo n.º 14
0
    def do_ping(self, **kwargs):
        '''
        This function will do the practical pinging
        :param kwargs:  switch - String: The switch the ping module is running
                        chain_prev - String: The previous node in the chain that needs to be pinged if it is a host
                        chain_next - String: The next node in the chain that needs to be pinged
                        from_to - String: {host_from}-{host_to} - used for identifying which namespace belongs to which measurement
        :return: hop_by_hop_latency: a datastructure representing the the results
        '''
        switch = kwargs.get('switch', None)
        chain_prev = kwargs.get('chain_prev', None)
        chain_next = kwargs.get('chain_next', None)
        from_to = kwargs.get('from_to', None)

        ns_name = "{}-ping-{}".format(switch, from_to)

        #this will indicate whether a certain call of this function pings 2 (host and ping module), or just one (chain_next)
        first_hop_ping = True

        #this will be the return value - it is a list, since in the first iteration two pings need to be done
        #it is always zeroed each time this function is called, however from the caller side we keep track of all
        #latency data by extending there a list with this return_list in each step
        return_list = list()

        #this will be the end of the command grepping the relevant part, and seding the string 'time=' to a special
        #character '%', which never appears in pings output. This would make it easier to tokenize the output during
        #analyzation
        grep_and_sed = "grep '64\ bytes'|sed 's/time\=/%/'"

        #first case, when chain_prev is a host -> this time we need to ping that, as we don't want it to ping us
        if chain_prev.startswith("h"):
            self.log.debug("Measuring latency between {} - {}".format(
                chain_prev, switch))

            output_file = "ping_from_{}_to_{}".format(switch, chain_prev)
            ping_cmd = "ip netns exec {} ping -c 4 {} |{} >> {}".format(
                ns_name,
                self.nmaas_network_controller.nmaas_graph.get_node(chain_prev)
                ["ipv4"][0], grep_and_sed, output_file)
            invoke.invoke(command=ping_cmd)
            #store results in the return_list
            return_list.extend([self._analyze_ping_files(output_file)])

        else:
            first_hop_ping = False

        self.log.debug("Measuring latency between {} - {}".format(
            switch, chain_next))

        output_file = "ping_from_{}_to_{}".format(switch, chain_next)
        if chain_next.startswith("s"):

            #a ping module needs to be ping
            #First, we need to figure out the exact IP address of that module
            ip = "10.{}.{}.{}".format(
                self.PING_ID,  # this will be 200
                self.nmaas_network_controller.nmaas_graph.get_node(
                    chain_next)["dp"].id,  #this will be
                # and integer
                self.switch_data[chain_next]["ip_end"][-1] -
                1  #there is a pool for the IPs and
                #Veths for a given switch and we
                #always take the last one out when
                #instantiating new module, so the
                #last unused-1 will be the last used
            )

            ping_cmd = "ip netns exec {} ping -c 4 {} |{} >> {}".format(
                ns_name, ip, grep_and_sed, output_file)

        else:

            # chain_next is a host and, on the other hand, we reached the end of the chain
            ping_cmd = "ip netns exec {} ping -c 4 {} |{} >> {}".format(
                ns_name,
                self.nmaas_network_controller.nmaas_graph.get_node(chain_next)
                ["ipv4"][0], grep_and_sed, output_file)

        invoke.invoke(command=ping_cmd)

        # store results in the return_list
        return_list.extend([self._analyze_ping_files(output_file)])

        return return_list
Ejemplo n.º 15
0
    def checkConfig(self):
        '''
        This function will check the set config parameters and correctness, i.e.,
        whether paths to binaries exist, other config parameters have the right
        type, etc. 
        return - Int: -1 error, 0 otherwise 
        '''
        #check pktgen's directory existence
        if not self.checkDirectoryExistence(self._config["PKTGEN_ROOT"]):
            return -1
            
        #ok, pktgen dir exists, check whether the binary exists as well
        pktgen_bin = self._config["PKTGEN_ROOT"] + "/" + \
                     self._config["PKTGEN_BIN"]
        if not self.checkFileExistence(pktgen_bin):
            return -1
        
        #check whether nfpa's MAIN_ROOT is set correctly
        if not self.checkDirectoryExistence(self._config["MAIN_ROOT"]):
            return -1


        #check whether NFPA is going to setup the flows in the vnf
        #make parameter to lowercase
        self._config["control_nfpa"] = self._config["control_nfpa"].lower()
        if self._config["control_nfpa"] == "true":
            #make it a boolean variable for easier checks later
            self._config["control_nfpa"]=bool("true") #this makes it True
        elif self._config["control_nfpa"] == "false":
            # make it a boolean variable for easier checks later
            self._config["control_nfpa"]=bool(0) #this makes it False
        else:
            #type
            self.log.warn("contron_nfpa has a typo (%s) -- fallback to False" %
                          self._config['control_nfpa'])
            self._config["control_nfpa"] = bool(0)  # this makes it False


        #if control_nfpa is True we check the other related parameters, otherwise
        #these are unnecessary
        if self._config["control_nfpa"]:
            #convert it first to lowercase
            self._config["control_vnf"] = self._config["control_vnf"].lower()
            #check whether it is supported
            if self._config["control_vnf"] not in self._config["controllers"]:
                self.log.error("The control_vnf (%s) is not supported!")
                self.log.error("Disable control_nfpa in nfpa.cfg and configure your vnf manually")
                exit(-1)

            #check paths to the binaries
            #directory
            if not self.checkFileExistence(self._config["control_path"]):
                return -1


        
        #### --- === check PKTGEN port masks and core masks === --- ####
        #store cpu_port_assign in temporary variable 'a'
        a = self._config["cpu_port_assign"]
        #var a contains a string like this "2.0,3.1"
        #remove head and tail double quotes
        a = a.replace('"','')
        tmp_max_core_mask = 0
        digits = []
        for i in a.split(','):
           
            #this produces ["2.0","3.1"]
            tmp_i = i.split('.')[0]
            self.log.debug("next desired core num: " + str(tmp_i))

            #check whether mutliple core was set (try to convert it to int)
            try:
                int_tmp_i = int(tmp_i)
                #this produces ['"','2','3']
                #put them to digits list
                digits.append(copy.deepcopy(int_tmp_i))
                if int(int_tmp_i) > tmp_max_core_mask:
                    #get the maximum of cpu core masks
                    tmp_max_core_mask = int(int_tmp_i)
            
            ### MULTI CORE HANDLING ###    
            except ValueError as e:
                self.log.info("Multicore coremask has been recognized: %s" % 
                              str(tmp_i))
                #this case is when multiple cores wanted to be used
                
                #split by ':' -> this results in at least a 1 element long
                #list. If there was no ':', then one element long list,
                #otherwise two elemets will be in the list
                #first, remove brackets
                tmp_i=tmp_i.replace('[','')
                tmp_i=tmp_i.replace(']','')
                multi_core_list = tmp_i.split(":")
                
                #~ print multi_core_list
                for i in range(0,len(multi_core_list)):
                #~ tmp_bool = false #indicator of found ':'
                #~ if len(multi_core_list) == 1:
                    #~ #same as before, only olny element
                    #~ multi_core = multi_core_list[0]
                    #~ tmp_bool = true
                #~ else:
                    #~ tmp_bool = false
                #parsing only if core mask is set like [2-4]
                #cut the first and last char, since they are rectangular 
                #parentheses (brackets)
                    multi_core = multi_core_list[i]
                    #~ print multi_core
                    #~ multi_core = copy.deepcopy(multi_core[1:(len(multi_core)-1)])
                    #~ print multi_core
                    #ok, we need to split the string according to the dash between
                    #the core numbers 2-4

                    try:
                      min_c = int(multi_core.split('-')[0])
                    except ValueError as e:
                      self.log.error("cpu_core_mask (%s) is wrong! Isn't there any typo?" % a)
                      self.log.error("Python error: %s" % e)
                      exit(-1)
                    
                    #if there is no range specified, i.e., there is no dash 
                    #then we won't get two separate pieces
                    if len(multi_core.split('-')) > 1:
                      max_c = int(multi_core.split('-')[1])
                    else:
                      #if no dash was specified, let the max_c be simply
                      #min_c as well -> it won't cause problems later
                      max_c = min_c
              
                    for mc in range(min_c, max_c+1):
                        #append core nums to digits
                        digits.append(copy.deepcopy(int(mc)))
                        #update max core num if necessary (required later
                        #for checking the length of the specified bit 
                        #mask for CPU core
                        if int(mc) > tmp_max_core_mask:
                            tmp_max_core_mask = int(mc)

        #alright, we got max core mask needs to be used
        #now, check whether the the main cpu_core_mask variable covers it
        #calculate how many bits are required for cpu_core_mask
        #store cpu_core_mask in temporary variable 'b'
        b = self._config["cpu_core_mask"]
        #calculate the required bits
        bin_core_mask = bin(int(b,16))[2:]
        bit_length = len(bin_core_mask)

        #this only checks whether the bitmask is long enough
        if tmp_max_core_mask > bit_length-1:
            #this means that cpu_core_mask is not set correctly, since
            #fewer core are reserved, than is required for assignment
            #define in cpu_port_assign
            self.log.error("Wrong core mask was set!")
            self.log.error("max core id (%d) assigned to ports is the (%dth)"\
                         " bit, however core mask ('%s') only reserves (%d) "\
                         "bits" % (tmp_max_core_mask,
                                   tmp_max_core_mask+1,
                                   b,
                                   bit_length))
            self.log.error("!!! %d > %d !!!" % (tmp_max_core_mask+1,bit_length))
            #~ self.log.error("Core ids to be used:")
            #~ self.log.error(digits)
            #~ self.log.error("Reserved cores:")
            #~ self.log.error(bin_core_mask)
            return -1
        #we need to check the correctness as well, as are the corresponding
        #bits are 1
        bin_core_mask = list(bin_core_mask)

#         self.log.debug(str(bin_core_mask))
        #reverse list for getting the right order -> will be easier to access
        #and check bits via length of the list
        bin_core_mask.reverse()

        self.log.debug("Required CPU ids  :" + str(digits))
        #starts from core num 0 on the left
        self.log.debug("Reserved Core mask:" + str(bin_core_mask) + " (reversed)")

        #check correctness (whether corresponding bit is set in core mask)
        for bit in digits:
            cpu_id = bin_core_mask[int(bit)]
            if(cpu_id != '1'):
                self.log.error("Core mask is not set properly.")
                self.log.error("Required CPU id (%d) is not enabled in core"\
                             " mask!" % int(bit))
                self.log.error("core mask: %s" % str(bin_core_mask))
                self.log.error("Required digits needs to be enabled: %s" %
                             str(digits))
                return -1
        
        self.log.info("CORE MASKS SEEM TO BE CORRECT!")


        #check port_mask
        pm = self._config["port_mask"]
        #~ print(pm)
        if (pm != '1' and pm != '3'):
            #port mask is mis-configured
            self.log.error("Port mask could be only 1 or 3!")
            return -1
        else:
            if(pm == '1'):
                self.log.debug("PORT MASK IS 1")
#                 self.log.debug("sendPort: %s" % self._config["sendPort"])
#                 self.log.debug("recvPort: %s" % self._config["recvPort"])
                if(self._config["sendPort"] != '0' and 
                   self._config["recvPort"] != '0'):
                    self.log.error("In case of Port mask 1, sendPort and " +\
                                    "recvPort need to be 0!")
                    return -1
 
        #port mask is ok, sendPort and recvPort could be different, for instance,
        #dpdk and/or pktgen is enabled for multiple interfaces, but you only need
        #2 interfaces from them
#         else:
             #port_mask is set correctly, we need to check sendPort and recvPort
#             #accordingly
#             if(pm == 1):
#                 #port mask is 1
#                 if(sendPort != 0 and recvPort != 0):
#                     self.log.error("In case of Port mask 1, sendPort and " +\
#                                    "recvPort need to be 0!")
#                     self.log.error("EXITING...")
#                     exit(-1)
#             else:
#                 #port mask is 3
#                 if(sendPort > 1 and recvPort > 1):
#                     #ports can only be 0 or 1
#                     self.log.error("sendPort and recvPort could only be 0 or 1")
#                     self.log.error("EXITING...")
#                     exit(-1)
#                 else:
#                     #port are in the correct range
#                     if (sendPort == recvPort):
#                         self.log.error("sendPort and recvPort must be " +\
#                                        "different in case of port_mask: %s" % 
#                                        pm)
#                     self.log.error("EXITING...")
#                     exit(-1)
        #PORT MASK = OK


        #Check available hugepages
        # first get the relevant information from the OS
        #commands for getting hugepage information
        free_hugepages_cmd = "cat /proc/meminfo |grep HugePages_Free"
        total_hugepages_cmd = "cat /proc/meminfo | grep HugePages_Total"
        hugepage_size_cmd = "cat /proc/meminfo|grep Hugepagesize"

        #get the data - invoce.check_retval will analyze the return values as well, and as a third
        #parameter, we need to pass him our self.log instance to make him able to write out error messages
        free_hugepages = (invoke.invoke(command=free_hugepages_cmd,
                                        logger=self.log))[0]
        total_hugepages = (invoke.invoke(command=total_hugepages_cmd,
                                        logger=self.log))[0]
        hugepage_size = (invoke.invoke(command=hugepage_size_cmd,
                                        logger=self.log))[0]

        #get the second part of the outputs
        free_hugepages = free_hugepages.split(":")[1]
        total_hugepages = total_hugepages.split(":")[1]
        tmp_hugepage_size = copy.deepcopy(hugepage_size)
        #this looks like : "Hugepagesize:       2048 kB"
        #first we get the right part after the colon, then we remove the whitespaces from '       2048 kB.
        #Finally we split that again with whitespace, and gets the first apart of the list, which is 2048
        hugepage_size = hugepage_size.split(":")[1].strip().split(" ")[0]
        hugepage_size_unit = tmp_hugepage_size.split(":")[1].strip().split(" ")[1]
        #remove whitespaces
        free_hugepages = free_hugepages.strip()
        total_hugepages = total_hugepages.strip()
        hugepage_size = hugepage_size.strip()
        hugepage_size_unit = hugepage_size_unit.strip()

        #convert them to int
        free_hugepages = int(free_hugepages)
        total_hugepages = int(total_hugepages)
        #save total hugepages in self.config in order to calculate with it when the whole process'
        #estimated time is calculated - zeroiung 1 hugepage takes approx. 0.5s
        self._config["total_hugepages"]=total_hugepages

        hugepage_size = int(hugepage_size)
        #check wheter hugepage size unit is kB (until now (2016), there are defined in kB)
        if(hugepage_size_unit == "kB"):
            hugepage_size = hugepage_size/1024
        else:
            self.error("Cannot determine Hugepage size (check lines 364-405 in read_config.py to improve code) :(")
            return -1

        self.log.info("Hugepage size in MB: %s" % hugepage_size)
        self.log.info("Total hugepages: %s" % total_hugepages)
        self.log.info("Free hugepages: %s " % free_hugepages)

        if(total_hugepages == 0):
            self.log.error("Hugepages are not enabled? Check the output of: cat /proc/meminfo |grep -i hugepages")
            return -1

        if(free_hugepages == 0):
            self.log.error("There is no hugepages left! Check the output of: cat /proc/meminfo |grep -i hugepages")
            return -1

        # check socket_mem param if exists or not empty
        if (("socket_mem" in self._config) and (len(self._config["socket_mem"]) > 0)):
            # socket_mem parameter could have more than one important value due to the NUMA awareness
            # In this case, it is separated via a ',' (comma), so parse this value
            socket_mem_list = self._config["socket_mem"].split(',')
            # check if the required number of hugepages are enough
            usable_hugepages = free_hugepages * hugepage_size
            for i in socket_mem_list:
                # no NUMA config was set
                socket_mem = int(i)
                usable_hugepages-=socket_mem
            if(usable_hugepages >= 0):
                self.log.info("There were enough hugepages to initialize pktgen (req: %s (MB), avail:%s! (MB)" % (self._config["socket_mem"],
                                                                                                      (free_hugepages*hugepage_size)))
            else:
                self.log.error("Insufficient hugepages! Your required setting '%s' (MB) does not correspond to the available " \
                               "resources %s (MB)" %(self._config["socket_mem"], (free_hugepages*hugepage_size)))
                self.log.error("Check the output of: cat /proc/meminfo |grep -i hugepages")
                return -1


    #check biDir param
        try:
            self._config["biDir"] = int((self._config["biDir"]))
            #check the value
            if((self._config["biDir"] != 0) and (self._config["biDir"] != 1)):
                self.log.error("biDir (%s) can only be 1 or 0!" % 
                             self._config["biDir"])
                return -1
        except ValueError as ve:
            self.log.error("biDir (%s) IS NOT A NUMBER!!!" % self._config["biDir"])
            return -1

        #check pcap files
        #check config file consistency (traffic types and packet sizes)
        if not self.checkPcapFileExists():
            #there is no pcap file for the given packet size and traffic type
            #or there is no pcap file for realistic traffics
            return -1
            
        warning = False
        #check whether packetsize is set, but no synthetic traffictype is set
        if self._config['packetSizes'] and not self._config["trafficTypes"]:
            self.log.warning("Packetsize(s) set without synthetic traffic type(s)")
            self.log.warning("SKIPPING...")
            warning = True
            time.sleep(1)
            
        elif not self._config['packetSizes'] and self._config["trafficTypes"]:
            self.log.warning("Synthetic traffic type(s) set without packet size(s)")
            self.log.warning("SKIPPING...")
            warning = True
            time.sleep(1)
        if warning and not self._config['realisticTraffics']:
            self.log.error("Nothing to DO! Check configuration!")
            return -1
            
        self.log.debug("cpu_make: %s" % self._config['cpu_make'])    
        self.log.debug("cpu_model: %s" % self._config['cpu_model'])
        self.log.debug("nic_make: %s" % self._config['nic_make'])
        self.log.debug("nic_model: %s" % self._config['nic_model'])
        self.log.debug("virtualization: %s" % self._config['virtualization'])
        self.log.debug("vnf_name: %s" % self._config['vnf_name'])
        self.log.debug("vnf_driver: %s" % self._config['vnf_driver'])
        self.log.debug("vnf_driver_version: %s" % 
                        self._config['vnf_driver_version'])
        self.log.debug("vnf_version: %s" % self._config['vnf_version'])
        self.log.debug("vnf_function: %s" % self._config['vnf_function'])
        self.log.debug("vnf_num_cores: %s" % self._config['vnf_num_cores'])
        self.log.debug("vnf_comment: %s" % self._config['vnf_comment'])
        self.log.debug("username: %s" % self._config['username'])
        self.log.debug("control_nfpa: %s" % self._config['control_nfpa'])
        self.log.debug("control_vnf: %s" % self._config['control_vnf'])
        self.log.debug("control_path: %s" % self._config['control_path'])
        self.log.debug("control_args: %s" % self._config['control_args'])
        self.log.debug("control_mgmt: %s" % self._config['control_mgmt'])
        self.log.debug("email_service: %s" % self._config['email_service'])
        if self._config['email_service'].lower() == "true":
            self.log.debug("email_from: %s" % self._config['email_from'])
            self.log.debug("email_to: %s" % self._config['email_to'])
            self.log.debug("email_server: %s" % self._config['email_server'])
            self.log.debug("email_port: %s" % self._config['email_port'])
            self.log.debug("email_username: %s" % self._config['email_username'])
            self.log.debug("email_password: HIDDEN to not store in logs")
            self.log.debug("email_timeout: %s" % self._config['email_timeout'])



        self._config['dbhelper'].connect()
        #check user
        self._config['dbhelper'].getUser(self._config['username'])
        self._config['dbhelper'].disconnect()
        
        return 0
Ejemplo n.º 16
0
    def createSymlinksForLuaScripts(self):
        '''
        This function creates symlinks in pktgen's main root directory that
        point to nfpa_simple.lua and nfpa_traffic.lua
        These symlinks are always freshly generated and old one are deleted.
        '''
        #remove all existing nfpa lua scripts
        self.log.info("Remove old symlinks...")
        remove_cmd = "rm -rf " + self._config["PKTGEN_ROOT"] + "/nfpa_simple.lua"  
        invoke.invoke(command=remove_cmd,
                      logger=self.log)


        remove_cmd = "rm -rf " + self._config["PKTGEN_ROOT"] + "/nfpa_traffic.lua"                       
        invoke.invoke(command=remove_cmd,
                      logger=self.log)

        
        remove_cmd = "rm -rf " +  self._config["PKTGEN_ROOT"] + \
                     "/nfpa_realistic.lua"                       
        invoke.invoke(command=remove_cmd,
                      logger=self.log)

        
        self.log.info("DONE")
        #create symlink for nfpa_simple.lua
        self.log.info("create symlinks")
        symlink_cmd = "ln -s " + self._config["MAIN_ROOT"] + \
                "/lib/nfpa_simple.lua " + self._config["PKTGEN_ROOT"] + \
                "/nfpa_simple.lua"
        self.log.info(symlink_cmd)  
        invoke.invoke(command=symlink_cmd,
                      logger=self.log)

        #create symlink for nfpa_traffic.lua
        self.log.info("create symlinks")
        symlink_cmd = "ln -s " + self._config["MAIN_ROOT"] + \
                "/lib/nfpa_traffic.lua " + self._config["PKTGEN_ROOT"] + \
                "/nfpa_traffic.lua"
        self.log.info(symlink_cmd)  
        invoke.invoke(command=symlink_cmd,
                      logger=self.log)

         
        #create symlink for nfpa_realistic.lua
        self.log.info("create symlinks")
        symlink_cmd = "ln -s " + self._config["MAIN_ROOT"] + \
                "/lib/nfpa_realistic.lua " + self._config["PKTGEN_ROOT"] + \
                "/nfpa_realistic.lua"
        self.log.info(symlink_cmd)  
        invoke.invoke(command=symlink_cmd,
                      logger=self.log)
Ejemplo n.º 17
0
Archivo: nfpa.py Proyecto: cslev/nfpa
    def configureVNFRemote(self, vnf_function, traffictype):
        '''
        This function will configure the remote vnf via pre-installed tools
        located on the same machine where NFPA is.
        Only works for some predefined vnf_function and traffictraces

        :return: True - if success, False - if not
        '''

        #the path to the openflow rules
        of_path = self.config["MAIN_ROOT"] + "/of_rules/"
        # temporary variable for bidir status - it is needed for flow_rules_preparator
        bidir = False

        #handle here OpenFlow and setup via ovs-ofctl
        if self.config["control_vnf"].lower() == "openflow":

            # first, delete the flows
            ofctl_cmd = self.config["control_path"] + " " + \
                        self.config["control_args"] +\
                        " <C> " + \
                        self.config["control_mgmt"] + " "
            cmd = ofctl_cmd.replace("<C>", "del-flows")
            self.log.debug("control cmd: %s" % cmd)
            invoke.invoke(command=cmd,
                          logger=self.log,
                          email_adapter=self.config['email_adapter'])
            self.log.info("Flow rules deleted")

            # second, delete groups
            cmd = ofctl_cmd.replace("<C>", "del-groups")
            self.log.debug("control cmd: %s" % cmd)
            invoke.invoke(command=cmd,
                          logger=self.log,
                          email_adapter=self.config['email_adapter'])
            self.log.info("Groups deleted")

            #OK, flows are deleted, so replace 'del-flows' to 'add-flows' for
            # easier usage later
            cmd = ofctl_cmd.replace("<C>", "add-flows")
            #first check vnf_function, if it is bridge, then no special stuff needs
            #to be setup regardless of the traces
            ############     BRIDGE ###########
            if self.config["vnf_function"].lower() == "bridge":
                #add birdge rules - located under of_rules
                scenario_path = vnf_function + "_unidir.flows"
                if not (os.path.isfile(str(of_path + scenario_path))):
                    self.log.error("Missing flow rule file: %s" % scenario_path)
                    self.log.error("NFPA does not know how to configure VNF to act as a bridge")
                    self.log.error("More info: http://ios.tmit.bme.hu/nfpa")
                    if (self.config['email_adapter'] is not None) and \
                        (not self.config['email_adapter'].sendErrorMail()):
                        self.log.error("Sending ERROR email did not succeed...")
                    exit(-1)

                if self.config["biDir"] == 1:
                    #change flow rule file if bidir was set
                    scenario_path = scenario_path.replace("unidir","bidir")
                    bidir=True

                #prepare flow rule file
                scenario_path = flow_prep.prepareOpenFlowRules(self.log,
                                                               of_path,
                                                               scenario_path,
                                                               self.config["control_vnf_inport"],
                                                               self.config["control_vnf_outport"],
                                                               bidir)
                cmd = ofctl_cmd.replace("<C>","add-flows") + scenario_path
                self.log.info("add-flows via '%s'" % cmd)
                invoke.invoke(command=cmd,
                              logger=self.log,
                              email_adapter=self.config['email_adapter'])
                # print out stdout if any
                self.log.info("Flows added")
                return True
            ############    =============   ###########


            ############     OTHER CASES    ###########
            #check whether flow rules exists?
            #convention vnf_function.trace_direction.flows
            scenario_path = vnf_function + "." + traffictype + "_unidir.flows"
            if not (os.path.isfile(str(of_path + scenario_path))):
                self.log.error("Missing flow rule file: %s" % scenario_path)
                self.log.error("NFPA does not know how to configure VNF to act as " + \
                               "%s for the given trace %s" % (vnf_function,traffictype))
                self.log.error("More info: http://ios.tmit.bme.hu/nfpa")
                if (self.config['email_adapter'] is not None) and \
                    (not self.config['email_adapter'].sendErrorMail()):
                    self.log.error("Sending ERROR email did not succeed...")
                exit(-1)


            #If flow file exists try to find corresponding groups
            scenario_path = scenario_path.replace(".flows",".groups")
            self.log.info("Looking for group file: %s" % scenario_path)
            if (os.path.isfile(str(of_path + scenario_path))):
                self.log.info("Group file found for this scenario: %s" % scenario_path)
                #prepare group file, i.e., replace port related meta data
                group_path = flow_prep.prepareOpenFlowRules(self.log,
                                                               of_path,
                                                               scenario_path,
                                                               self.config["control_vnf_inport"],
                                                               self.config["control_vnf_outport"],
                                                               False) #TODO: bidir handling here
                cmd = ofctl_cmd.replace("<C>","add-groups")
                cmd += " " + group_path
                self.log.info("add-groups via '%s'" % cmd)
                invoke.invoke(command=cmd,
                              logger=self.log,
                              email_adapter=self.config['email_adapter'])
            else:
                self.log.info("No group file was found...continue")

            #change back to the .flows file from .groups
            scenario_path = scenario_path.replace(".groups", ".flows")

            #if biDir is set, then other file is needed where the same rules are present
            #in the reverse direction
            if (int(self.config["biDir"]) == 1):
                #biDir for remote vnf configuration is currently not supported!
                self.log.error("Configuring your VNF by NFPA for bi-directional scenario " +
                               "is currently not supported")
                self.log.error("Please verify your nfpa.cfg")
                if (self.config['email_adapter'] is not None) and \
                    (not self.config['email_adapter'].sendErrorMail()):
                    self.log.error("Sending ERROR email did not succeed...")
                exit(-1)
                #save biDir setting in a boolean to later use for flow_prep.prepareOpenFlowRules()
                # bidir = True
                # scenario_path=scenario_path.replace("unidir","bidir")
                # if not (os.path.isfile(str(of_path + scenario_path))):
                #     self.log.error("Missing flow rule file: %s" % scenario_path)
                #     self.log.error("NFPA does not know how to configure VNF to act as " + \
                #                    "%s for the given trace %s in bi-directional mode" %
                #                    (vnf_function,traffictype))
                #     self.log.error("More info: http://ios.tmit.bme.hu/nfpa")
                #     exit(-1)

            #replace metadata in flow rule files
            scenario_path = flow_prep.prepareOpenFlowRules(self.log,
                                                           of_path,
                                                           scenario_path,
                                                           self.config["control_vnf_inport"],
                                                           self.config["control_vnf_outport"],
                                                           bidir)
            #assemble command ovs-ofctl
            cmd = ofctl_cmd.replace("<C>","add-flows") + scenario_path
            self.log.info("add-flows via '%s'" % cmd)
            self.log.info("This may take some time...")
            invoke.invoke(command=cmd,
                          logger=self.log,
                          email_adapter=self.config['email_adapter'])
            self.log.info("Flows added")
            return True
        ############    =============   ###########


        else:
            self.log.error("Currently, only openflow is supported!")
            if (self.config['email_adapter'] is not None) and \
                (not self.config['email_adapter'].sendErrorMail()):
                self.log.error("Sending ERROR email did not succeed...")
            exit(-1)
Ejemplo n.º 18
0
#! /usr/bin/env python
from invoke import invoke

if __name__ == "__main__":
    resp=invoke("LayoutEndpoint",{
        "opponent":"test",
        "width":10,
        "height":10,
        "ruleSet":{
            "shipCells":10,
            "connectedShipCells":5
        }
    })
    print(resp)
Ejemplo n.º 19
0
    def createSymlinksForLuaScripts(self):
        '''
        This function creates symlinks in pktgen's main root directory that
        point to nfpa_simple.lua and nfpa_traffic.lua
        These symlinks are always freshly generated and old one are deleted.
        '''
        #remove all existing nfpa lua scripts
        self.log.info("Remove old symlinks...")
        remove_cmd = "rm -rf " + self._config["PKTGEN_ROOT"] + "/nfpa_simple.lua"  
        invoke.invoke(command=remove_cmd,
                      logger=self.log)


        remove_cmd = "rm -rf " + self._config["PKTGEN_ROOT"] + "/nfpa_traffic.lua"                       
        invoke.invoke(command=remove_cmd,
                      logger=self.log)

        
        remove_cmd = "rm -rf " +  self._config["PKTGEN_ROOT"] + \
                     "/nfpa_realistic.lua"                       
        invoke.invoke(command=remove_cmd,
                      logger=self.log)

        
        self.log.info("DONE")
        #create symlink for nfpa_simple.lua
        self.log.info("create symlinks")
        symlink_cmd = "ln -s " + self._config["MAIN_ROOT"] + \
                "/lib/nfpa_simple.lua " + self._config["PKTGEN_ROOT"] + \
                "/nfpa_simple.lua"
        self.log.info(symlink_cmd)  
        invoke.invoke(command=symlink_cmd,
                      logger=self.log)

        #create symlink for nfpa_traffic.lua
        self.log.info("create symlinks")
        symlink_cmd = "ln -s " + self._config["MAIN_ROOT"] + \
                "/lib/nfpa_traffic.lua " + self._config["PKTGEN_ROOT"] + \
                "/nfpa_traffic.lua"
        self.log.info(symlink_cmd)  
        invoke.invoke(command=symlink_cmd,
                      logger=self.log)


         
        #create symlink for nfpa_realistic.lua
        self.log.info("create symlinks")
        symlink_cmd = "ln -s " + self._config["MAIN_ROOT"] + \
                "/lib/nfpa_realistic.lua " + self._config["PKTGEN_ROOT"] + \
                "/nfpa_realistic.lua"
        self.log.info(symlink_cmd)  
        invoke.invoke(command=symlink_cmd,
                      logger=self.log)
Ejemplo n.º 20
0
 def invoke(self, cmd, msg):
     self.log.debug("%s with %s" % (msg, cmd))
     invoke(command=cmd, logger=self.log,
            email_adapter=self.config['email_adapter'])
     self.log.info("%s: done" % msg)
Ejemplo n.º 21
0
    def check_available_hugepages_orig(self):
        # first get the relevant information from the OS
        #commands for getting hugepage information
        free_hugepages_cmd = "cat /proc/meminfo |grep HugePages_Free"
        total_hugepages_cmd = "cat /proc/meminfo | grep HugePages_Total"
        hugepage_size_cmd = "cat /proc/meminfo|grep Hugepagesize"

        #get the data - invoce.check_retval will analyze the return values as well, and as a third
        #parameter, we need to pass him our self.log instance to make him able to write out error messages
        free_hugepages = (invoke.invoke(command=free_hugepages_cmd,
                                        logger=self.log))[0]
        total_hugepages = (invoke.invoke(command=total_hugepages_cmd,
                                        logger=self.log))[0]
        hugepage_size = (invoke.invoke(command=hugepage_size_cmd,
                                        logger=self.log))[0]

        #get the second part of the outputs
        free_hugepages = free_hugepages.split(":")[1]
        total_hugepages = total_hugepages.split(":")[1]
        tmp_hugepage_size = copy.deepcopy(hugepage_size)
        #this looks like : "Hugepagesize:       2048 kB"
        #first we get the right part after the colon, then we remove the whitespaces from '       2048 kB.
        #Finally we split that again with whitespace, and gets the first apart of the list, which is 2048
        hugepage_size = hugepage_size.split(":")[1].strip().split(" ")[0]
        hugepage_size_unit = tmp_hugepage_size.split(":")[1].strip().split(" ")[1]
        #remove whitespaces
        free_hugepages = free_hugepages.strip()
        total_hugepages = total_hugepages.strip()
        hugepage_size = hugepage_size.strip()
        hugepage_size_unit = hugepage_size_unit.strip()

        #convert them to int
        free_hugepages = int(free_hugepages)
        total_hugepages = int(total_hugepages)
        #save total hugepages in self.config in order to calculate with it when the whole process'
        #estimated time is calculated - zeroiung 1 hugepage takes approx. 0.5s
        self._config["total_hugepages"]=total_hugepages

        hugepage_size = int(hugepage_size)
        #check wheter hugepage size unit is kB (until now (2016), there are defined in kB)
        if(hugepage_size_unit == "kB"):
            hugepage_size = hugepage_size/1024
        else:
            self.error("Cannot determine Hugepage size (check lines 364-405 in read_config.py to improve code) :(")
            return -1

        self.log.info("Hugepage size in MB: %s" % hugepage_size)
        self.log.info("Total hugepages: %s" % total_hugepages)
        self.log.info("Free hugepages: %s " % free_hugepages)

        if(total_hugepages == 0):
            self.log.error("Hugepages are not enabled? Check the output of: cat /proc/meminfo |grep -i hugepages")
            return -1

        if(free_hugepages == 0):
            self.log.error("There is no hugepages left! Check the output of: cat /proc/meminfo |grep -i hugepages")
            return -1

        # check socket_mem param if exists or not empty
        if (("socket_mem" in self._config) and (len(self._config["socket_mem"]) > 0)):
            # socket_mem parameter could have more than one important value due to the NUMA awareness
            # In this case, it is separated via a ',' (comma), so parse this value
            socket_mem_list = self._config["socket_mem"].split(',')
            # check if the required number of hugepages are enough
            usable_hugepages = free_hugepages * hugepage_size
            for i in socket_mem_list:
                # no NUMA config was set
                socket_mem = int(i)
                usable_hugepages-=socket_mem
            if(usable_hugepages >= 0):
                self.log.info("There were enough hugepages to initialize pktgen (req: %s (MB), avail:%s! (MB)" % (self._config["socket_mem"],
                                                                                                      (free_hugepages*hugepage_size)))
            else:
                self.log.error("Insufficient hugepages! Your required setting '%s' (MB) does not correspond to the available " \
                               "resources %s (MB)" %(self._config["socket_mem"], (free_hugepages*hugepage_size)))
                self.log.error("Check the output of: cat /proc/meminfo |grep -i hugepages")
                return -1
Ejemplo n.º 22
0
#! /usr/bin/env python
from invoke import invoke

height = 10
width = 10
if __name__ == "__main__":
    resp = invoke("ShootEndpoint", {
        "opponent": "test",
        "board": [[0] * width for x in range(height)],
    })
    print(resp)