示例#1
0
def prepareOpenFlowRules(logger, path, flowrulefile,inport,outport, bidir):
    '''
    This function will take the openflow flow rule files consisting the meta port data, and replaces
    them according to the control_vnf_inport and control_vnf_outport parameter. The original
    flow rule file will still be existed for further reusage, so a new specialized flow rule
    file will born temporary.

    :param logger Logger: the logger object from the calling class to make it possible to log
    :param flowrulefile String: the defined flow rule file with meta port data
    :param inport String: control_vnf_inport
    :param outport String: control_vnf_outport
    :param bidir Bool: to indicate whether INPORT2 and OUTPORT1 also exist in the file

    :return: new temporary flow rule file
    '''

    #shortening the got params
    l = logger
    f = flowrulefile
    fpath = path + flowrulefile

    #get a timestamp for having unique filenames for temporary flow rule files
    t=time.time()
    timestamp=str(df.getDateFormat(t))

    #start reading file and replacing (calling linux's sed is simpler than make it in python)
    l.info("Parsing file %s" % f)
    #temporary name for the temporary file
    tmp_file = path + "tmp/" + f + "_tmp_" + inport + "_" + outport + "_" + timestamp
    #first sed command for inport
    sed_cmd = 'sed "s/<INPORT1>/' + inport + '/" ' + fpath + ' > ' + tmp_file
    #invoke first sed
    invoke.invoke(command=sed_cmd,
                  logger=l)
    #second sed command for outport - From now, we already have the tmp file,
    #so we make the changes over it
    sed_cmd = 'sed -i "s/<OUTPORT2>/' + outport + '/" ' + tmp_file
    invoke.invoke(command=sed_cmd,
                  logger=l)

    #third and fourth sed if bidir is set
    if bidir:
        #note again that if there is no such inport and outport in the flow rule files
        #sed doesn't do anything, thus it won't mess the file even if we call it
        sed_cmd = 'sed -i "s/<INPORT2>/' + outport + '/" ' + tmp_file
        invoke.invoke(command=sed_cmd,
                      logger=l)
        sed_cmd = 'sed -i "s/<OUTPORT1>/' + inport + '/" ' + tmp_file
        invoke.invoke(command=sed_cmd,
                      logger=l)

    return tmp_file
示例#2
0
def prepareOpenFlowRules(logger, path, flowrulefile, inport, outport, bidir):
    '''
    This function will take the openflow flow rule files consisting the meta port data, and replaces
    them according to the control_vnf_inport and control_vnf_outport parameter. The original
    flow rule file will still be existed for further reusage, so a new specialized flow rule
    file will born temporary.

    :param logger Logger: the logger object from the calling class to make it possible to log
    :param flowrulefile String: the defined flow rule file with meta port data
    :param inport String: control_vnf_inport
    :param outport String: control_vnf_outport
    :param bidir Bool: to indicate whether INPORT2 and OUTPORT1 also exist in the file

    :return: new temporary flow rule file
    '''

    #shortening the got params
    l = logger
    f = flowrulefile
    fpath = path + flowrulefile

    #get a timestamp for having unique filenames for temporary flow rule files
    t = time.time()
    timestamp = str(df.getDateFormat(t))

    #start reading file and replacing (calling linux's sed is simpler than make it in python)
    l.info("Parsing file %s" % f)
    #temporary name for the temporary file
    tmp_file = path + "tmp/" + f + "_tmp_" + inport + "_" + outport + "_" + timestamp
    #first sed command for inport
    sed_cmd = 'sed "s/<INPORT1>/' + inport + '/" ' + fpath + ' > ' + tmp_file
    #invoke first sed
    invoke.invoke(command=sed_cmd, logger=l)
    #second sed command for outport - From now, we already have the tmp file,
    #so we make the changes over it
    sed_cmd = 'sed -i "s/<OUTPORT2>/' + outport + '/" ' + tmp_file
    invoke.invoke(command=sed_cmd, logger=l)

    #third and fourth sed if bidir is set
    if bidir:
        #note again that if there is no such inport and outport in the flow rule files
        #sed doesn't do anything, thus it won't mess the file even if we call it
        sed_cmd = 'sed -i "s/<INPORT2>/' + outport + '/" ' + tmp_file
        invoke.invoke(command=sed_cmd, logger=l)
        sed_cmd = 'sed -i "s/<OUTPORT1>/' + inport + '/" ' + tmp_file
        invoke.invoke(command=sed_cmd, logger=l)

    return tmp_file
示例#3
0
文件: send_mail.py 项目: cslev/nfpa
    def __init__(self, config):
        '''
        Constructor
        :param config: dictionary of the configration from nfpa.cfg
        '''
        self.config = config
        if self.config['email_service'].lower() != "true":
            return 0

        self.log = l.getLogger(self.__class__.__name__,
                               self.config['LOG_LEVEL'],
                               self.config['app_start_date'],
                               self.config['LOG_PATH'])

        # get current timestamp
        self.st = df.getDateFormat(self.config['app_start_date'])

        self.SUBJECT = "[NFPA-SERVICE] "
示例#4
0
    def __init__(self, config):
        '''
        Constructor
        :param config: dictionary of the configration from nfpa.cfg
        '''
        self.config = config
        if self.config['email_service'].lower() != "true":
            return 0

        self.log = l.getLogger(self.__class__.__name__,
                               self.config['LOG_LEVEL'],
                               self.config['app_start_date'],
                               self.config['LOG_PATH'])

        # get current timestamp
        self.st = df.getDateFormat(self.config['app_start_date'])

        self.SUBJECT = "[NFPA-SERVICE] "
示例#5
0
文件: nfpa.py 项目: P4ELTE/cbt
    def initialize(self):

        #read config
        self.rc = ReadConfig(self.config_file)
        if (self.rc == -1):
            #error during reading config
            return -1

        self.config = self.rc.getConfig()

        self.log = l.getLogger(self.__class__.__name__,
                               self.config['LOG_LEVEL'],
                               self.config['app_start_date'],
                               self.config['LOG_PATH'])

        self.pid_file = self.config['MAIN_ROOT'] + "/" + "nfpa.pid"
        self.log.info("Deleting previous pid_file: %s" % self.pid_file)
        os.system("rm -rf " + self.pid_file)

        #before fresh start remove temporary files if they were not removed
        #already. This could be happen, if in some case, NFPA crashes, and
        #temporary res files in PKTGEN_ROOT/ still remains existing and can
        #influence a latter measurement results in a wrong way
        self.log.info("Clean up old .res files in PKTGEN's root dir...")
        self.deleteResFiles()
        self.log.info("[DONE]")

        #create a tmp directory for flow rules under nfpa/of_rules
        path = self.config["MAIN_ROOT"] + "/of_rules/tmp"
        if not os.path.exists(path):
            os.makedirs(path)

        self.log.debug("tmp directory created under of_rules")


        self.log.info("### Measurement scenario '" + self.scenario_name + \
                      "' has been initiated ###")

        #append scenario name to self.config dictionary for later usage
        self.config['scenario_name'] = self.scenario_name

        self.log.debug(str(self.config))
        #assembling log file path
        self.log_file_path = self.config['MAIN_ROOT'] + "/log/log_" + \
                             df.getDateFormat(self.config['app_start_date']) +\
                             ".log"

        self.log.info("Log file for this measurement is: %s" %
                      self.log_file_path)
        self.log.info("THANKS FOR USING NFPA FOR MEASURING")

        self.storePID(str(os.getpid()))
        self.log.debug("NFPA PID stored")

        # create an instance of the EmailAdapter and store this object in self.config
        # if email service was enabled in the config file
        if self.config['email_service'].lower() == "true":
            self.config['email_adapter'] = EmailAdapter(self.config)
        else:
            self.config['email_adapter'] = None

        #adding no_plot variable to self.config to be able to share it later with visualizer
        self.config['no_plot'] = self.no_plot
示例#6
0
文件: nfpa.py 项目: cslev/nfpa
    def initialize(self):

        
        #read config
        self.rc = ReadConfig(self.config_file)
        if(self.rc == -1):
            #error during reading config
            return -1
            
        self.config = self.rc.getConfig()



        self.log = l.getLogger(self.__class__.__name__,
                               self.config['LOG_LEVEL'],
                               self.config['app_start_date'],
                               self.config['LOG_PATH'])
        
        
        self.pid_file=self.config['MAIN_ROOT'] + "/" + "nfpa.pid"
        self.log.info("Deleting previous pid_file: %s" % self.pid_file)
        os.system("rm -rf " + self.pid_file)
        
        #before fresh start remove temporary files if they were not removed
        #already. This could be happen, if in some case, NFPA crashes, and
        #temporary res files in PKTGEN_ROOT/ still remains existing and can
        #influence a latter measurement results in a wrong way
        self.log.info("Clean up old .res files in PKTGEN's root dir...")
        self.deleteResFiles()
        self.log.info("[DONE]")

        #create a tmp directory for flow rules under nfpa/of_rules
        path=self.config["MAIN_ROOT"] + "/of_rules/tmp"
        if not os.path.exists(path):
            os.makedirs(path)

        self.log.debug("tmp directory created under of_rules")
        
        
        self.log.info("### Measurement scenario '" + self.scenario_name + \
                      "' has been initiated ###")
        
        
        #append scenario name to self.config dictionary for later usage
        self.config['scenario_name'] = self.scenario_name

        
        self.log.info(str(self.config))
        #assembling log file path
        self.log_file_path = self.config['MAIN_ROOT'] + "/log/log_" + \
                             df.getDateFormat(self.config['app_start_date']) +\
                             ".log"

        self.log.info("Log file for this measurement is: %s" % self.log_file_path)
        self.log.info("THANKS FOR USING NFPA FOR MEASURING")

        self.storePID(str(os.getpid()))
        self.log.debug("NFPA PID stored")

        # create an instance of the EmailAdapter and store this object in self.config
        # if email service was enabled in the config file
        if self.config['email_service'].lower() == "true":
            self.config['email_adapter'] = EmailAdapter(self.config)
        else:
            self.config['email_adapter'] = None
示例#7
0
文件: logger.py 项目: cslev/nfpa
def getLogger(class_name, level, timestamp, path):
    '''
    This function will create a logger and returns it. The logger object is 
    logging to stdout considering the given logging level, and also logs into
    a file with loglevel DEBUG to print out everything
    class_name String - the class name that asks for a logger object
    level String - the desired logging level (DEBUG, INFO, WARNING, ERROR, 
    CRITICAL
    timestamp - time stamp for the name of the log file
    path - the path the log file should be saved
    '''
    logger = logging.getLogger(class_name)

    #if logger already has handlers, it means that it is already configured,
    #so we just pass back the reference
    if logger.handlers:
        return logger

    timestamp = df.getDateFormat(timestamp)
    
    #remove log/ from the path, and check the parent directory's existence
    path_parent_dir = path[:-4]

    if not (os.path.isdir(path_parent_dir)):
            print("Path to create log/ directory (%s) does not exist!" % 
                          path_parent_dir)
            print("EXITING...")
            exit(-1)


    #create the log directory
    if not os.path.exists(path):
        os.makedirs(path)

    
     # create file handler which logs even debug messages
    fh = logging.FileHandler(path + '/log_' + timestamp + ".log")
    fh.setLevel(logging.DEBUG)
    # create console handler with a higher log level
    ch = logging.StreamHandler()
    
    
    level = level.upper()
    if level == "DEBUG":
        logger.setLevel(logging.DEBUG)
        ch.setLevel(logging.DEBUG)
    elif level == "INFO":
        logger.setLevel(logging.INFO)
        ch.setLevel(logging.INFO)
    elif level == "WARNING":
        logger.setLevel(logging.WARNING)
        ch.setLevel(logging.WARNING)
    elif level == "ERROR":
        logger.setLevel(logging.ERROR)
        ch.setLevel(logging.ERROR)
    elif level == "CRITICAL":
        logger.setLevel(logging.CRITICAL)
        ch.setLevel(logging.CRITICAL)
    else:
        print("Log level was not set properly...set to default DEBUG")
        logger.setLevel(logging.DEBUG)
    
            
        
    
    logging.addLevelName( logging.INFO, str("%s%s%s" % 
                                       (colors['info'], 
                                        logging.getLevelName(logging.INFO),
                                        no_color)))
    logging.addLevelName( logging.DEBUG, str("%s%s%s" % 
                                       (colors['debug'], 
                                        logging.getLevelName(logging.DEBUG),
                                        no_color)))
    logging.addLevelName( logging.WARNING, str("%s%s%s" % 
                                       (colors['warning'], 
                                        logging.getLevelName(logging.WARNING),
                                        no_color)))
    logging.addLevelName( logging.ERROR, str("%s%s%s" % 
                                       (colors['error'], 
                                        logging.getLevelName(logging.ERROR),
                                        no_color)))
    logging.addLevelName( logging.CRITICAL, str("%s%s%s" % 
                                       (colors['critical'], 
                                        logging.getLevelName(logging.CRITICAL),
                                        no_color)))
    
#     logging.addLevelName( logging.ERROR, "\033[1;41m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
   
    # create formatter and add it to the handlers
    formatter = logging.Formatter('[%(name)s] - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    fh.setFormatter(formatter)

    # add the handlers to logger
    logger.addHandler(ch)
    logger.addHandler(fh)

    return logger
示例#8
0
文件: visualizer.py 项目: cslev/nfpa
    def __init__(self, **params):
        '''
        Two main params are: config, and results, but for further possible 
        extensions, some other params could also be passed and processed
        '''
        
        self.config = params.get('config', None)
        self.results = params.get('results', None)
        self.type = params.get('type', None)
        self.tt = params.get('traffic_trace', None)
        
        
        #create a reference for logger
        self.log = l.getLogger( self.__class__.__name__, 
                                self.config['LOG_LEVEL'], 
                                self.config['app_start_date'],
                                self.config['LOG_PATH'])
        
        self.log.info("STARTED...")
        
        
        if(self.type is None):
           self.log.error("Class wrongly instantiated - NO RESULTS TYPE SET!")
        
        #get current timestamp        
        st = df.getDateFormat(self.config['app_start_date'])
        
        #biDir prefix
        dir = "uniDir"
        if(int(self.config['biDir']) == 1):
            dir = "biDir"
            
        
        
        #create prefix for results/gnuplot files with lower granularity
        #vnf_name -> vnf_driver -> cpu -> virt -> port_type 
        self.prefix = self.config['RES_PATH'] + "/" + \
                      self.config['vnf_name'] + "/" + \
                      self.config['vnf_driver'] + "/" + \
                      self.config['cpu_make'] + "/" + \
                      "virt_" + self.config['virtualization'] + "/" + \
                      self.config['port_type'] + "/"
  

        #check whether directory exists
        if not os.path.exists(self.prefix):
            os.makedirs(self.prefix)


        
        
                      
        self.prefix += self.config['scenario_name']  + "_" + \
                      "TRAFFICTYPE." + dir +"_" + str(st) + ".data"
                      #TRAFFICTYPE will be replaced later to the actual traffic
                      
        
        
        
        #if any of the variable above are non-exist vars, we need to TERMINATE
        if(self.config is None or self.results is None):
            self.log.error("config and results dictionaries are EMPTY")
            self.log.error("Something went wrong during initialization")
            self.log.error("EXITING...")
            os.exit(-1)
        
        #create gnuplot readable file
        self.createGnuplotDataFile()
        
        
        #ready
        self.log.info("[DONE]")
        self.log.info("Charts could be found in " + self.config['MAIN_ROOT'] +\
                    "/" + self.config['RES_DIR'])
示例#9
0
    def __init__(self, **params):
        '''
        Two main params are: config, and results, but for further possible 
        extensions, some other params could also be passed and processed
        '''

        self.config = params.get('config', None)
        self.results = params.get('results', None)
        self.type = params.get('type', None)
        self.tt = params.get('traffic_trace', None)

        #create a reference for logger
        self.log = l.getLogger(self.__class__.__name__,
                               self.config['LOG_LEVEL'],
                               self.config['app_start_date'],
                               self.config['LOG_PATH'])

        self.log.info("STARTED...")

        if (self.type is None):
            self.log.error("Class wrongly instantiated - NO RESULTS TYPE SET!")

        #get current timestamp
        st = df.getDateFormat(self.config['app_start_date'])

        #biDir prefix
        dir = "uniDir"
        if (int(self.config['biDir']) == 1):
            dir = "biDir"

        #create prefix for results/gnuplot files with lower granularity
        #vnf_name -> vnf_driver -> cpu -> virt -> port_type
        self.prefix = self.config['RES_PATH'] + "/" + \
                      self.config['vnf_name'] + "/" + \
                      self.config['vnf_driver'] + "/" + \
                      self.config['cpu_make'] + "/" + \
                      "virt_" + self.config['virtualization'] + "/" + \
                      self.config['port_type'] + "/"

        res_files_location = copy.deepcopy(self.prefix)

        #check whether directory exists
        if not os.path.exists(self.prefix):
            os.makedirs(self.prefix)





        self.prefix += self.config['scenario_name']  + "_" + \
                      "TRAFFICTYPE." + dir +"_" + str(st) + ".data"
        #TRAFFICTYPE will be replaced later to the actual traffic

        #if any of the variable above are non-existing vars, we need to TERMINATE
        if (self.config is None or self.results is None):
            self.log.error("config and results dictionaries are EMPTY")
            self.log.error("Something went wrong during initialization")
            self.log.error("EXITING...")
            os.exit(-1)

        #create gnuplot readable file
        self.createGnuplotDataFile()

        #ready
        self.log.info("[DONE]")
        self.log.info("Results could be found in " + res_files_location)
示例#10
0
文件: logger.py 项目: levaitamas/cbt
def getLogger(class_name, level, timestamp, path):
    '''
    This function will create a logger and returns it. The logger object is 
    logging to stdout considering the given logging level, and also logs into
    a file with loglevel DEBUG to print out everything
    class_name String - the class name that asks for a logger object
    level String - the desired logging level (DEBUG, INFO, WARNING, ERROR, 
    CRITICAL
    timestamp - time stamp for the name of the log file
    path - the path the log file should be saved
    '''
    logger = logging.getLogger(class_name)

    #if logger already has handlers, it means that it is already configured,
    #so we just pass back the reference
    if logger.handlers:
        return logger

    timestamp = df.getDateFormat(timestamp)

    #remove log/ from the path, and check the parent directory's existence
    path_parent_dir = path[:-4]

    if not (os.path.isdir(path_parent_dir)):
        print("Path to create log/ directory (%s) does not exist!" %
              path_parent_dir)
        print("EXITING...")
        exit(-1)

    #create the log directory
    if not os.path.exists(path):
        os.makedirs(path)

    # create file handler which logs even debug messages
    fh = logging.FileHandler(path + '/log_' + timestamp + ".log")
    fh.setLevel(logging.DEBUG)
    # create console handler with a higher log level
    ch = logging.StreamHandler()

    level = level.upper()
    if level == "DEBUG":
        logger.setLevel(logging.DEBUG)
        ch.setLevel(logging.DEBUG)
    elif level == "INFO":
        logger.setLevel(logging.INFO)
        ch.setLevel(logging.INFO)
    elif level == "WARNING":
        logger.setLevel(logging.WARNING)
        ch.setLevel(logging.WARNING)
    elif level == "ERROR":
        logger.setLevel(logging.ERROR)
        ch.setLevel(logging.ERROR)
    elif level == "CRITICAL":
        logger.setLevel(logging.CRITICAL)
        ch.setLevel(logging.CRITICAL)
    else:
        print("Log level was not set properly...set to default DEBUG")
        logger.setLevel(logging.DEBUG)

    logging.addLevelName(
        logging.INFO,
        str("%s%s%s" %
            (colors['info'], logging.getLevelName(logging.INFO), no_color)))
    logging.addLevelName(
        logging.DEBUG,
        str("%s%s%s" %
            (colors['debug'], logging.getLevelName(logging.DEBUG), no_color)))
    logging.addLevelName(
        logging.WARNING,
        str("%s%s%s" % (colors['warning'], logging.getLevelName(
            logging.WARNING), no_color)))
    logging.addLevelName(
        logging.ERROR,
        str("%s%s%s" %
            (colors['error'], logging.getLevelName(logging.ERROR), no_color)))
    logging.addLevelName(
        logging.CRITICAL,
        str("%s%s%s" % (colors['critical'],
                        logging.getLevelName(logging.CRITICAL), no_color)))

    #     logging.addLevelName( logging.ERROR, "\033[1;41m%s\033[1;0m" % logging.getLevelName(logging.ERROR))

    # create formatter and add it to the handlers
    formatter = logging.Formatter('[%(name)s] - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    fh.setFormatter(formatter)

    # add the handlers to logger
    logger.addHandler(ch)
    logger.addHandler(fh)

    return logger