Esempio n. 1
0
 def __init__(self, *args):
     '''
     Constructor
     '''
     super().__init__(*args)
     self.dataitems = [DataItem('IP_MULTICAST_CONN',2,600,1200)]
     self.cost = 5000
     self.wait = 5 # waiting time in seconds
     config = configuration.get_configparser()
     # dict for receiver: ip:timestamp
     self.receiver = {}
     try:
         self.mcip = config.get('Multicast Tester', 'groupip')
     except:
         pass
     try:
         self.mcport = config.getint('Multicast Tester', 'groupport')
     except:
         pass
     try:
         self.retries = config.getint('Multicast Tester', 'maxretries')
     except:
         pass
     # prepare socket
     any = "0.0.0.0"
     self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 
                                 socket.IPPROTO_UDP)
     self.socket.bind((any, self.mcport))
     self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 7)
     self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
     self.socket.setsockopt(socket.IPPROTO_IP,
                            socket.IP_ADD_MEMBERSHIP, 
                            socket.inet_aton(self.mcip) + 
                            socket.inet_aton(any))
Esempio n. 2
0
    def __init__(self):
        '''
        Constructor
        '''
        self.logger = logging.getLogger(self.__class__.__name__)
        self.logger.setLevel(logging.DEBUG)

        self.config = configuration.get_configparser()
        
        self.pending_orders = {}
        self.eventq = Queue()
        # RB
        # if mc should filter the receivers the list of plugins should be parameters
        self.mc = MissionControl(self.eventq)
        #
        
        # TODO: add bogus task to help signal handling / cache garbage collection
        self.scheduler = Scheduler(self)
        self.init_plugins()
        self.init_database()
        self.start_xmlrpcserver()
        self.start_xmlrpcreplyhandler()

        # memorize local IPs
        self.ips = getIpAddresses()
Esempio n. 3
0
def init_logging(daemon=False):
    config = configuration.get_configparser()
    # configure the root logger
    logger = logging.getLogger()
    # root logger should accept all loglevels, handlers decide what to log
    logger.setLevel(logging.NOTSET)

    # create formatter and add it to the handlers
    logformat = logging.Formatter('%(asctime)s - %(name)-32s - %(levelname)-8s - %(message)s')
    try:
        logformat = logging.Formatter(config.get('Logging', 'log_format'))
    except:
        print('using default logformat')
    # define debug format
    debugformat = logging.Formatter('%(asctime)s - %(threadName)-10s - %(name)-32s - %(levelname)-8s - %(filename)-15s - %(lineno)4d - %(message)s')
    try:
        debugformat = logging.Formatter(config.get('Logging', 'debugformat'))
    except:
        print('using default debugformat')
    # get global loglevel
    try:
        loglevel = config.get('Logging', 'loglevel')
        loglevel = getattr(logging,loglevel.upper())
    except:
        # if config doesnt exist or is broken
        print('using default loglevel INFO')
        loglevel = logging.INFO
    # create logging handler for logfile
    if (config.get('Logging', 'uselogfile')):
        print()
        # create file handler which logs all but no debug
        fh = logging.FileHandler(expanduser(config.get('Logging','logfile')))
        fh.setLevel(loglevel)
        fh.setFormatter(logformat)
        logger.addHandler(fh)
    # create logging handler for console

    if not(config.getboolean('Logging','daemonmodelogging') or daemon):
        print('we enable console logging')
        ch = logging.StreamHandler()
        # create console handler for debugging
        if (config.getboolean('General','debug')):
            ch.setLevel(logging.DEBUG)
            ch.setFormatter(debugformat)
        else:
            ch.setLevel(loglevel)
            ch.setFormatter(logformat)
        # add the handlers to logger
        logger.addHandler(ch)
    else:
        print('we disable console logging')
    logger = logging.getLogger(__name__)
    logger.info('UNISONO logging started')
Esempio n. 4
0
 def measure(self):
     config = configuration.get_configparser()
     options = config.options('cValues')
     self.logger.debug('cValues options: %s', options)
     for di in self.dataitems:
         try:
             self.request[di.name] = config.get('cValues', di.name)
             self.request['error'] = 0
             self.request['errortext'] = 'Measurement successful'
         except:
             self.request['error'] = 312
             self.request['errortext'] = 'No value configured'
     self.logger.debug('the values are: %s', self.request)
Esempio n. 5
0
File: db.py Progetto: e110c0/unisono
def restoreDataBase(diprops):
    '''
    restoreDataBase handles the first initialization of the cache database in UNISONO
    It takes care of creating, restoring and initial cleanup of the database.
    All instances of DataBase connect to this existing database. This works also 
    for in-memory databases (the preferred db type for unisono)
    '''
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    config = configuration.get_configparser()
    db = DataBase(diprops)
    dbcon = db.dbcon
    c = dbcon.cursor()
    
    try:
        dbfile = self.config.get('Cache', 'dbfile')
        logger.info('Connecting to DB: %s', dbfile)
    except:
        logger.info('Connecting to DB at default location')
        dbfile = ':memory:'
    if dbfile == ':memory:':
        try:
            storagemode = self.config.get('Cache', 'storagemode')
            logger.info('Storage mode: %s', storagemode)
        except:
            logger.info('No storage mode specified, working with transient cache')
            return
        if storagemode == 'persistent':
            try:
                persistentfile = self.config.get('Cache', 'persistentfile')
                logger.info('Storage file: %s', persistentfile)
            except:
                logger.info('No storage file specified, working with transient cache')
                return
            try:
                c.executescript(open(persistentfile, mode='r').read())
                dbcon.commit()
                c.close()
            except IOError:
                logger.error('Could not open file: %s', persistentfile)
            except sqlite3.OperationalError:
                logger.error('Corrupted dump file: %s', persistentfile)
                dbcon.rollback()
                c.close()
                dbcon.close()
        else:
            logger.info('Working with transient database, no need for restore.')
            return
    else:
        logger.info('Already working with persistent database, no need for restore.')
        return
Esempio n. 6
0
File: db.py Progetto: e110c0/unisono
 def __init__(self, diprops):
     '''
     The first instance of DataBase creates a mysqlite database at the 
     configured location. All further instances connect to this existing
     database. This works also for in-memory databases (the preferred db
     type for unisono)
     '''
     self.config = configuration.get_configparser()
     try:
         dbfile = self.config.get('Cache', 'dbfile')
         self.logger.debug('Connecting to DB: %s', dbfile)
     except:
         self.logger.debug('Connecting to DB at default location')
         dbfile = ':memory:'
     self.dbcon = sqlite3.connect(dbfile)
     self.dataitemprops = diprops
Esempio n. 7
0
    def __init__(self, outq):
        '''
        Constructor
        '''
        self.config = configuration.get_configparser()
        self.__port = self.config.getint('MissionControl', 'port');

        host = ""
        super().__init__((host,self.__port),MissionControlRequestHandler,False)
        self.allow_reuse_address = True
        self.server_bind()
        self.server_activate()
        self.__send_queue = queue.Queue() # outqueue filled by the dispatcher
        self.__receive_queue = outq # eventqueue of the dispatcher
        self.lock = Lock()
        self.do_quit = False
        self.trigger_wait_time = 3 # time the trigger threads wait, after rescanning a queue
        t_triggerSendQueue = Thread(target = self.triggerSendQueue, args = ())
        t_triggerSendQueue.daemon = True
        t_triggerSendQueue.start()
        t_Recv = Thread(target = self.receive, args = ())
        t_Recv.daemon = True
        t_Recv.start()