def __init__(self, client, logger): self.client = client # Client object (give access to all configuration options) self.timeout = client.timeout # No timeout for now self.logger = logger # Logger object self.connection = None # The connection self.igniter = None self.ssl = False self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) self.debugFile = False self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8 * 3600) # AMQP is there a max for message size # self.set_maxLength(self.client.maxLength) # statistics. self.totBytes = 0 self.initialTime = time.time() self.finalTime = None self._connect()
class unittest_DiskReader(unittest.TestCase): def setUp(self, logFile='log/DiskReader.log'): self.logger = Logger(logFile, 'DEBUG', 'Sub') self.logger = self.logger.getLogger() self.reader = DiskReader('txq/test/', 20, False, 5, False, True, self.logger) def test_DiskReader(self): self.reader.read() #print self.reader.files self.assertEqual(self.reader.files, [ 'txq/test//0/2012053108/file-F', 'txq/test//1/2012053108/file-E', 'txq/test//2/2012053108/file-D', 'txq/test//3/2012053108/file-A', 'txq/test//3/2012053108/file-C', 'txq/test//4/2012053108/file-B', 'txq/test//5/2012053108/file-A' ]) #print self.reader.getFilenamesAndContent() self.assertEqual(self.reader.getFilenamesAndContent(), [('', 'txq/test//0/2012053108/file-F'), ('file E', 'txq/test//1/2012053108/file-E'), ('', 'txq/test//2/2012053108/file-D'), ('file A', 'txq/test//3/2012053108/file-A'), ('file C', 'txq/test//3/2012053108/file-C'), ('', 'txq/test//4/2012053108/file-B'), ('', 'txq/test//5/2012053108/file-A')])
class unittest_DiskReader(unittest.TestCase): def setUp(self,logFile='log/DiskReader.log'): self.logger = Logger(logFile, 'DEBUG', 'Sub') self.logger = self.logger.getLogger() self.reader = DiskReader('txq/test/', 20, False, 5, False,True,self.logger) def test_DiskReader(self): self.reader.read() #print self.reader.files self.assertEqual(self.reader.files,['txq/test//0/2012053108/file-F', 'txq/test//1/2012053108/file-E', 'txq/test//2/2012053108/file-D', 'txq/test//3/2012053108/file-A', 'txq/test//3/2012053108/file-C', 'txq/test//4/2012053108/file-B', 'txq/test//5/2012053108/file-A']) #print self.reader.getFilenamesAndContent() self.assertEqual(self.reader.getFilenamesAndContent(),[('', 'txq/test//0/2012053108/file-F'), ('file E', 'txq/test//1/2012053108/file-E'), ('', 'txq/test//2/2012053108/file-D'), ('file A', 'txq/test//3/2012053108/file-A'), ('file C', 'txq/test//3/2012053108/file-C'), ('', 'txq/test//4/2012053108/file-B'), ('', 'txq/test//5/2012053108/file-A')])
def __init__(self, client, logger): self.client = client # Client object (give access to all configuration options) self.remoteHost = client.host # Remote host (name or ip) self.port = int( client.port) # Port (int) to which the receiver is bind self.address = (self.remoteHost, self.port) # Socket address self.timeout = client.timeout # No timeout for now self.logger = logger # Logger object self.socketAMIS = None # The socket self.igniter = None self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) self.preamble = chr(curses.ascii.SOH) + "\r\n" self.endOfLineSep = "\r\r\n" self.endOfMessage = self.endOfLineSep + chr( curses.ascii.ETX) + "\r\n\n" + chr(curses.ascii.EOT) self.debugFile = False self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8 * 3600) # AMIS's maximum bulletin size is 14000 self.set_maxLength(self.client.maxLength) # statistics. self.totBytes = 0 self.initialTime = time.time() self.finalTime = None self._connect()
def ingestCollection(self, igniter): from DiskReader import DiskReader import bulletinManager import CollectionManager bullManager = bulletinManager.bulletinManager( PXPaths.RXQ + self.source.name, self.logger, PXPaths.RXQ + self.source.name, 99999, '\n', self.source.extension, self.source.routingTable, self.source.mapEnteteDelai, self.source) reader = DiskReader(bullManager.pathSource, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter,self.source) collect = CollectionManager.CollectionManager( self, bullManager, reader ) while True: # If a SIGHUP signal is received ... if igniter.reloadMode == True: # We assign the defaults, reread configuration file for the source # and reread all configuration file for the clients (all this in __init__) self.source.__init__(self.source.name, self.source.logger) bullManager = bulletinManager.bulletinManager( PXPaths.RXQ + self.source.name, self.logger, PXPaths.RXQ + self.source.name, 99999, '\n', self.source.extension, self.source.routingTable, self.source.mapEnteteDelai, self.source) reader = DiskReader(bullManager.pathSource, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter,self.source) collect = CollectionManager.CollectionManager( self, bullManager, reader ) self.logger.info("Receiver has been reloaded") igniter.reloadMode = False collect.process() time.sleep(20)
def resetReader(self): self.reader = DiskReader(PXPaths.TXQ + self.flow.name, self.flow.batch, # Number of files we read each time self.flow.validation, # name validation self.flow.patternMatching, # pattern matching self.flow.mtime, # we don't check modification time True, # priority tree self.logger, eval(self.flow.sorter), self.flow)
def __init__(self,path,client,logger): gateway.gateway.__init__(self, path, client, logger) self.client = client self.establishConnection() self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, # Number of files we read each time self.client.validation, # name validation self.client.patternMatching, # pattern matching self.client.mtime, # we don't check modification time True, # priority tree self.logger, eval(self.client.sorter), self.client) # Mechanism to eliminate multiple copies of a bulletin self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8*3600) # WMO's maximum bulletin size is 500 000 bytes self.set_maxLength( self.client.maxLength )
def __init__(self, sourlient): AFTNPaths.normalPaths(sourlient.name) PXPaths.normalPaths() self.sysman = SystemManager() # General system manager self.sourlient = sourlient # Sourlient (Source/Client) object containing configuration infos. self.logger = sourlient.logger # Logger object self.subscriber = sourlient.subscriber # Determine if it will act like a subscriber or a provider(MHS) self.host = sourlient.host # Remote host (name or ip) self.portR = sourlient.portR # Receiving port self.portS = sourlient.portS # Sending port self.batch = sourlient.batch # Number of files we read in a pass (20) self.timeout = sourlient.timeout # Timeout time in seconds (default = 10 seconds) self.sleepBetweenConnect = int('10') # Time (in seconds) between connection trials self.slow = sourlient.slow # Sleeps are added when we want to be able to decrypt log entries self.igniter = None # Igniter object (link to pid) self.writePath = AFTNPaths.RECEIVED # Where we write messages we receive self.archivePath = AFTNPaths.SENT # Where we put sent messages self.specialOrdersPath = AFTNPaths.SPECIAL_ORDERS # Where we put special orders # Paths creation self.sysman.createDir(PXPaths.TXQ + self.sourlient.name) self.sysman.createDir(self.writePath) self.sysman.createDir(self.archivePath) self.sysman.createDir(self.specialOrdersPath) self.mm = MessageManager(self.logger, self.sourlient) # AFTN Protocol is implemented in MessageManager Object self.remoteAddress = None # Remote address (where we will connect()) self.socket = None # Socket object self.dataFromFiles = [] # A list of tuples (content, filename) obtained from a DiskReader self.reader = DiskReader(PXPaths.TXQ + self.sourlient.name, self.sourlient.batch, self.sourlient.validation, self.sourlient.diskReaderPatternMatching, self.sourlient.mtime, True, self.logger, eval(self.sourlient.sorter), self.sourlient) self.debug = True # Debugging switch self.justConnect = False # Boolean that indicates when a connexion just occur self.totBytes = 0 #self.printInitInfos() self.makeConnection()
def __init__(self, client, logger): self.client = client # Client object (give access to all configuration options) self.timeout = client.timeout # No timeout for now self.logger = logger # Logger object self.connection = None # The connection self.igniter = None self.ssl = False self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) self.debugFile = False self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8*3600) # AMQP is there a max for message size # self.set_maxLength(self.client.maxLength) # statistics. self.totBytes = 0 self.initialTime = time.time() self.finalTime = None self._connect()
def doSpecialOrders(self, path): # Stop, restart, reload, deconnect, connect could be put here? reader = DiskReader(path) reader.read() dataFromFiles = reader.getFilenamesAndContent() for index in range(len(dataFromFiles)): words = dataFromFiles[index][0].strip().split() self.logger.info("Special Order: %s" % (dataFromFiles[index][0].strip())) if words[0] == 'outCSN': if words[1] == '+': self.nextCSN() self.logger.info("CSN = %s" % self.CSN) elif words[1] == '-': # This case is only done for testing purpose. It is not complete and not correct when CSN # value is 0 or 1 self.nextCSN(str(int(self.CSN) - 2)) self.logger.info("CSN = %s" % self.CSN) elif words[1] == 'print': self.logger.info("CSN = %s" % self.CSN) else: # We suppose it's a number, we don't verify!! self.nextCSN(words[1]) self.logger.info("CSN = %s" % self.CSN) elif words[0] == 'inCSN': if words[1] == '+': self.calcWaitedTID(self.waitedTID) self.logger.info("Waited TID = %s" % self.waitedTID) elif words[1] == '-': # This case is only done for testing purpose. It is not complete and not correct when waited TID # value is 0 or 1 self.calcWaitedTID(self.otherStationID + "%04d" % (int(self.waitedTID[3:]) - 2)) self.logger.info("Waited TID = %s" % self.waitedTID) elif words[1] == 'print': self.logger.info("Waited TID = %s" % self.waitedTID) else: # We suppose it's a number, we don't verify!! self.calcWaitedTID(self.otherStationID + "%04d" % int(words[1])) self.logger.info("Waited TID = %s" % self.waitedTID) elif words[0] == 'ackWaited': if words[1] == 'print': self.logger.info("Waiting for ack: %s" % self.getWaitingForAck()) else: self.setWaitingForAck(words[1]) self.incrementSendingInfos() elif words[0] == 'ackNotWaited': self.setWaitingForAck(None) self.resetSendingInfos() self.updatePartsToSend() elif words[0] == 'ackUsed': self.ackUsed = words[1] == 'True' or words[1] == 'true' elif words[0] == 'printState': self.logger.info(self.state.infos()) else: pass try: os.unlink(dataFromFiles[0][1]) self.logger.debug("%s has been erased", os.path.basename(dataFromFiles[index][1])) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.error("Unable to unlink %s ! Type: %s, Value: %s" % (dataFromFiles[index][1], type, value))
class senderAMQP: def __init__(self, client, logger): self.client = client # Client object (give access to all configuration options) self.timeout = client.timeout # No timeout for now self.logger = logger # Logger object self.connection = None # The connection self.igniter = None self.ssl = False self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) self.debugFile = False self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8*3600) # AMQP is there a max for message size # self.set_maxLength(self.client.maxLength) # statistics. self.totBytes = 0 self.initialTime = time.time() self.finalTime = None self._connect() def printSpeed(self): elapsedTime = time.time() - self.initialTime speed = self.totBytes/elapsedTime self.totBytes = 0 self.initialTime = time.time() return "Speed = %i" % int(speed) def setIgniter(self, igniter): self.igniter = igniter def resetReader(self): self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) def _connect(self): self.connection = None self.channel = None while True: try: host = self.client.host if self.client.port != None : host = host + ':' + self.client.port # connect self.connection = amqp.Connection(host, userid=self.client.user, password=self.client.passwd, ssl=self.ssl) self.channel = self.connection.channel() # what kind of exchange self.channel.access_request(self.client.exchange_realm, active=True, write=True) self.channel.exchange_declare(self.client.exchange_name, self.client.exchange_type, auto_delete=False) self.logger.info("AMQP Sender is now connected to: %s" % str(self.client.host)) break except: (type, value, tb) = sys.exc_info() self.logger.error("AMQP Sender cannot connected to: %s" % str(self.client.host)) self.logger.error("Type: %s, Value: %s, Sleeping 5 seconds ..." % (type, value)) time.sleep(5) def shutdown(self): pass def read(self): if self.igniter.reloadMode == True: # We assign the defaults and reread the configuration file (in __init__) if self.channel != None : self.channel.close() if self.connection != None : self.connection.close() self.client.__init__(self.client.name, self.client.logger) self.resetReader() self.cacheManager.clear() self.logger.info("Cache has been cleared") self.logger.info("Sender AMQP has been reloaded") self.igniter.reloadMode = False self.reader.read() return self.reader.getFilesContent(self.client.batch) def write(self, data): if len(data) >= 1: self.logger.info("%d new messages will be sent", len(data) ) for index in range(len(data)): self.logger.start_timer() # data info msg_body = data[index] nbBytesSent = len(msg_body) # if in cache than it was already sent... nothing to do # priority 0 is retransmission and is never suppressed path = self.reader.sortedFiles[index] priority = path.split('/')[-3] if self.client.nodups and priority != '0' and self.in_cache( data[index], True, path ) : #PS... same bug as in Senders AM, AMIS & WMO. #self.unlink_file( self.reader.sortedFiles[index] ) continue # get/check destination Name basename = os.path.basename(path) destName, destDir = self.client.getDestInfos(basename) if not destName : os.unlink(path) self.logger.info('No destination name: %s has been erased' % path) continue # build message parts = basename.split(':') if parts[-1][0:2] == '20' : parts = parts[:-1] hdr = {'filename': ':'.join(parts) } msg = amqp.Message(msg_body, content_type= self.client.exchange_content,application_headers=hdr) # exchange_key pattern exchange_key = self.client.exchange_key if '$' in self.client.exchange_key : exchange_key = self.keyPattern(basename,self.client.exchange_key) self.logger.debug("exchange key = %s" % exchange_key) # publish message self.channel.basic_publish(msg, self.client.exchange_name, exchange_key ) self.logger.delivered("(%i Bytes) Message %s delivered" % (nbBytesSent, basename),path,nbBytesSent) self.unlink_file( path ) self.totBytes += nbBytesSent else: time.sleep(1) def run(self): while True: data = self.read() try: self.write(data) except: (type, value, tb) = sys.exc_info() self.logger.error("Sender error! Type: %s, Value: %s" % (type, value)) # We close the connection try: self.channel.close() self.connection.close() except: (type, value, tb) = sys.exc_info() self.logger.error("Problem in closing socket! Type: %s, Value: %s" % (type, value)) # We try to reconnect. self._connect() #time.sleep(0.2) # check if data in cache... if not it is added automatically def in_cache(self,data,unlink_it,path): already_in = False # If data is already in cache, we don't send it if self.cacheManager.find(data, 'md5') is not None: already_in = True if unlink_it : try: os.unlink(path) self.logger.info("suppressed duplicate send %s", os.path.basename(path)) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.info("in_cache unable to unlink %s ! Type: %s, Value: %s" % (path, type, value)) return already_in
def ingestBulletinFile(self, igniter): from DiskReader import DiskReader import bulletinManager import bulletinManagerAm from PullFTP import PullFTP sleep_sec = 1 if self.source.type == 'pull-bulletin' or self.source.pull_script != None : sleep_sec = self.source.pull_sleep bullManager = bulletinManager.bulletinManager( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.bulletin_type == 'am' : bullManager = bulletinManagerAm.bulletinManagerAm( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.addSMHeader, PXPaths.STATION_TABLE, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.nodups : self.fileCache = CacheManager(maxEntries=self.source.cache_size, timeout=8*3600) reader = DiskReader(bullManager.pathSource, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter, self.source) while True: # If a SIGHUP signal is received ... if igniter.reloadMode == True: # We assign the defaults, reread configuration file for the source # and reread all configuration file for the clients (all this in __init__) if self.source.type == 'filter-bulletin' : self.source.__init__(self.source.name, self.source.logger, True, True) else : self.source.__init__(self.source.name, self.source.logger) bullManager = bulletinManager.bulletinManager( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.bulletin_type == 'am' : bullManager = bulletinManagerAm.bulletinManagerAm( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.addSMHeader, PXPaths.STATION_TABLE, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.nodups : self.fileCache = CacheManager(maxEntries=self.source.cache_size, timeout=8*3600) reader = DiskReader(bullManager.pathSource, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter,self.source) self.logger.info("Receiver has been reloaded") igniter.reloadMode = False # pull files in rxq directory if in pull mode if self.source.type == 'pull-bulletin' or self.source.pull_script != None : files = [] sleeping = os.path.isfile(PXPaths.RXQ + self.source.name + '/.sleep') if self.source.type == 'pull-bulletin' : puller = PullFTP(self.source,self.logger,sleeping) files = puller.get() puller.close() elif self.source.pull_script != None : files = self.source.pull_script(self.source,self.logger,sleeping) if not sleeping : self.logger.debug("Number of files pulled = %s" % len(files) ) else : self.logger.info("This pull is sleeping") # normal diskreader call for files reader.read() # processing the list if necessary... if self.source.lx_execfile != None and len(reader.sortedFiles) > 0: sfiles = [] sfiles.extend(reader.sortedFiles) self.logger.info("%d files process with lx_script" % len(sfiles)) sortedFiles = self.source.run_lx_script(sfiles,self.source.logger) reader.sortedFiles = sortedFiles # continue normally data = reader.getFilesContent(reader.batch) if len(data) == 0: time.sleep(sleep_sec) continue else: self.logger.info("%d bulletins will be ingested", len(data)) # Write (and name correctly) the bulletins to disk, erase them after for index in range(len(data)): # ignore duplicate if requiered duplicate = self.source.nodups and self.fileCache.find(data[index], 'md5') is not None #nb_bytes = len(data[index]) #self.logger.info("Lecture de %s: %d bytes" % (reader.sortedFiles[index], nb_bytes)) if not duplicate : # converting the file if necessary if self.source.fx_execfile != None : file = reader.sortedFiles[index] fxfile = self.source.run_fx_script(file,self.source.logger) # convertion did not work if fxfile == None : self.logger.warning("FX script ignored the file : %s" % os.path.basename(file) ) os.unlink(file) continue # file already in proper format elif fxfile == file : self.logger.warning("FX script kept the file as is : %s" % os.path.basename(file) ) # file converted... else : self.logger.info("FX script modified %s to %s " % (os.path.basename(file),os.path.basename(fxfile)) ) os.unlink(file) fp = open(fxfile,'r') dx = fp.read() fp.close() reader.sortedFiles[index] = fxfile data[index] = dx # writing/ingesting the bulletin if isinstance(bullManager,bulletinManagerAm.bulletinManagerAm): bullManager.writeBulletinToDisk(data[index], True) else : bullManager.writeBulletinToDisk(data[index], True, True) try: file = reader.sortedFiles[index] os.unlink(file) if duplicate : self.logger.info("suppressed duplicate file %s", os.path.basename(file)) self.logger.debug("%s has been erased", os.path.basename(file)) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.error("Unable to unlink %s ! Type: %s, Value: %s" % (reader.sortedFiles[index], type, value))
def ingestSingleFile(self, igniter): from DiskReader import DiskReader from DirectRoutingParser import DirectRoutingParser from PullFTP import PullFTP if self.source.routemask : self.drp = DirectRoutingParser(self.source.routingTable, self.allNames, self.logger, self.source.routing_version) self.drp.parse() if self.source.nodups : self.fileCache = CacheManager(maxEntries=self.source.cache_size, timeout=8*3600) reader = DiskReader(self.ingestDir, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter, self.source) sleep_sec = 1 if self.source.type == 'pull-file' or self.source.pull_script != None : sleep_sec = self.source.pull_sleep while True: if igniter.reloadMode == True: # We assign the defaults, reread configuration file for the source # and reread all configuration file for the clients (all this in __init__) if self.source.type == 'filter' : self.source.__init__(self.source.name, self.source.logger, True, True) else : self.source.__init__(self.source.name, self.source.logger) if self.source.routemask : self.drp = DirectRoutingParser(self.source.routingTable, self.allNames, self.logger) self.drp.parse() if self.source.nodups : self.fileCache = CacheManager(maxEntries=self.source.cache_size, timeout=8*3600) reader = DiskReader(self.ingestDir, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter, self.source) self.logger.info("Receiver has been reloaded") igniter.reloadMode = False # pull files in rxq directory if in pull mode if self.source.type == 'pull-file' or self.source.pull_script != None : files = [] sleeping = os.path.isfile(PXPaths.RXQ + self.source.name + '/.sleep') if self.source.type == 'pull-file' : puller = PullFTP(self.source,self.logger,sleeping) files = puller.get() puller.close() elif self.source.pull_script != None : files = self.source.pull_script(self.source,self.logger,sleeping) if not sleeping : self.logger.debug("Number of files pulled = %s" % len(files) ) else : self.logger.info("This pull is sleeping") # normal diskreader call for files reader.read() if len(reader.sortedFiles) <= 0: time.sleep(sleep_sec) continue sortedFiles = reader.sortedFiles[:self.source.batch] # processing the list if necessary... if self.source.lx_execfile != None : sfiles = [] sfiles.extend(sortedFiles) self.logger.info("%d files process with lx_script" % len(sfiles)) sortedFiles = self.source.run_lx_script(sfiles,self.source.logger) self.logger.info("%d files will be ingested" % len(sortedFiles)) for file in sortedFiles: self.ingestFile(file)
from MessageAFTN import MessageAFTN from DiskReader import DiskReader from MessageParser import MessageParser from Sourlient import Sourlient logger = Logger('/apps/px/aftn/log/mm.log', 'DEBUG', 'mm') logger = logger.getLogger() sourlient = Sourlient('aftn', logger) print "Longueur Max = %d" % MessageAFTN.MAX_TEXT_SIZE mm = MessageManager(logger, sourlient) reader = DiskReader("/apps/px/bulletins", 8) reader.read() reader.sort() """ for file in reader.getFilesContent(8): print file mm.setInfos(MessageParser(file).getHeader()) mm.printInfos() if mm.header: myMessage = MessageAFTN(logger, file, mm.stationID, mm.originatorAddress,mm.priority, mm.destAddress, mm.CSN, mm.filingTime, mm.dateTime) myMessage.printInfos() """ for file in reader.getFilesContent(8):
def resetReader(self): self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client)
from Logger import * from MessageAFTN import MessageAFTN from DiskReader import DiskReader from MessageParser import MessageParser from Sourlient import Sourlient logger = Logger('/apps/px/aftn/log/mm.log', 'DEBUG', 'mm') logger = logger.getLogger() sourlient = Sourlient('aftn', logger) print "Longueur Max = %d" % MessageAFTN.MAX_TEXT_SIZE mm = MessageManager(logger, sourlient) reader = DiskReader("/apps/px/bulletins", 8) reader.read() reader.sort() """ for file in reader.getFilesContent(8): print file mm.setInfos(MessageParser(file).getHeader()) mm.printInfos() if mm.header: myMessage = MessageAFTN(logger, file, mm.stationID, mm.originatorAddress,mm.priority, mm.destAddress, mm.CSN, mm.filingTime, mm.dateTime) myMessage.printInfos() """ for file in reader.getFilesContent(8):
def ingestBulletinFile(self, igniter): from DiskReader import DiskReader import bulletinManager import bulletinManagerAm from PullFTP import PullFTP sleep_sec = 1 if self.source.type == 'pull-bulletin' or self.source.pull_script != None: sleep_sec = self.source.pull_sleep bullManager = bulletinManager.bulletinManager( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.bulletin_type == 'am': bullManager = bulletinManagerAm.bulletinManagerAm( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.addSMHeader, PXPaths.STATION_TABLE, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.nodups: self.fileCache = CacheManager(maxEntries=self.source.cache_size, timeout=8 * 3600) reader = DiskReader(bullManager.pathSource, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter, self.source) while True: # If a SIGHUP signal is received ... if igniter.reloadMode == True: # We assign the defaults, reread configuration file for the source # and reread all configuration file for the clients (all this in __init__) if self.source.type == 'filter-bulletin': self.source.__init__(self.source.name, self.source.logger, True, True) else: self.source.__init__(self.source.name, self.source.logger) bullManager = bulletinManager.bulletinManager( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.bulletin_type == 'am': bullManager = bulletinManagerAm.bulletinManagerAm( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.addSMHeader, PXPaths.STATION_TABLE, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.nodups: self.fileCache = CacheManager( maxEntries=self.source.cache_size, timeout=8 * 3600) reader = DiskReader(bullManager.pathSource, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter, self.source) self.logger.info("Receiver has been reloaded") igniter.reloadMode = False # pull files in rxq directory if in pull mode if self.source.type == 'pull-bulletin' or self.source.pull_script != None: files = [] sleeping = os.path.isfile(PXPaths.RXQ + self.source.name + '/.sleep') if self.source.type == 'pull-bulletin': puller = PullFTP(self.source, self.logger, sleeping) files = puller.get() puller.close() elif self.source.pull_script != None: files = self.source.pull_script(self.source, self.logger, sleeping) if not sleeping: self.logger.debug("Number of files pulled = %s" % len(files)) else: self.logger.info("This pull is sleeping") # normal diskreader call for files reader.read() # processing the list if necessary... if self.source.lx_execfile != None and len(reader.sortedFiles) > 0: sfiles = [] sfiles.extend(reader.sortedFiles) self.logger.info("%d files process with lx_script" % len(sfiles)) sortedFiles = self.source.run_lx_script( sfiles, self.source.logger) reader.sortedFiles = sortedFiles # continue normally data = reader.getFilesContent(reader.batch) if len(data) == 0: time.sleep(sleep_sec) continue else: self.logger.info("%d bulletins will be ingested", len(data)) # Write (and name correctly) the bulletins to disk, erase them after for index in range(len(data)): # ignore duplicate if requiered duplicate = self.source.nodups and self.fileCache.find( data[index], 'md5') is not None #nb_bytes = len(data[index]) #self.logger.info("Lecture de %s: %d bytes" % (reader.sortedFiles[index], nb_bytes)) if not duplicate: # converting the file if necessary if self.source.fx_execfile != None: file = reader.sortedFiles[index] fxfile = self.source.run_fx_script( file, self.source.logger) # convertion did not work if fxfile == None: self.logger.warning( "FX script ignored the file : %s" % os.path.basename(file)) os.unlink(file) continue # file already in proper format elif fxfile == file: self.logger.warning( "FX script kept the file as is : %s" % os.path.basename(file)) # file converted... else: self.logger.info("FX script modified %s to %s " % (os.path.basename(file), os.path.basename(fxfile))) os.unlink(file) fp = open(fxfile, 'r') dx = fp.read() fp.close() reader.sortedFiles[index] = fxfile data[index] = dx # writing/ingesting the bulletin if isinstance(bullManager, bulletinManagerAm.bulletinManagerAm): bullManager.writeBulletinToDisk(data[index], True) else: bullManager.writeBulletinToDisk( data[index], True, True) try: file = reader.sortedFiles[index] os.unlink(file) if duplicate: self.logger.info("suppressed duplicate file %s", os.path.basename(file)) self.logger.debug("%s has been erased", os.path.basename(file)) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.error( "Unable to unlink %s ! Type: %s, Value: %s" % (reader.sortedFiles[index], type, value))
def ingestSingleFile(self, igniter): from DiskReader import DiskReader from DirectRoutingParser import DirectRoutingParser from PullFTP import PullFTP if self.source.routemask: self.drp = DirectRoutingParser(self.source.routingTable, self.allNames, self.logger, self.source.routing_version) self.drp.parse() if self.source.nodups: self.fileCache = CacheManager(maxEntries=self.source.cache_size, timeout=8 * 3600) reader = DiskReader(self.ingestDir, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter, self.source) sleep_sec = 1 if self.source.type == 'pull-file' or self.source.pull_script != None: sleep_sec = self.source.pull_sleep while True: if igniter.reloadMode == True: # We assign the defaults, reread configuration file for the source # and reread all configuration file for the clients (all this in __init__) if self.source.type == 'filter': self.source.__init__(self.source.name, self.source.logger, True, True) else: self.source.__init__(self.source.name, self.source.logger) if self.source.routemask: self.drp = DirectRoutingParser(self.source.routingTable, self.allNames, self.logger) self.drp.parse() if self.source.nodups: self.fileCache = CacheManager( maxEntries=self.source.cache_size, timeout=8 * 3600) reader = DiskReader(self.ingestDir, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter, self.source) self.logger.info("Receiver has been reloaded") igniter.reloadMode = False # pull files in rxq directory if in pull mode if self.source.type == 'pull-file' or self.source.pull_script != None: files = [] sleeping = os.path.isfile(PXPaths.RXQ + self.source.name + '/.sleep') if self.source.type == 'pull-file': puller = PullFTP(self.source, self.logger, sleeping) files = puller.get() puller.close() elif self.source.pull_script != None: files = self.source.pull_script(self.source, self.logger, sleeping) if not sleeping: self.logger.debug("Number of files pulled = %s" % len(files)) else: self.logger.info("This pull is sleeping") # normal diskreader call for files reader.read() if len(reader.sortedFiles) <= 0: time.sleep(sleep_sec) continue sortedFiles = reader.sortedFiles[:self.source.batch] # processing the list if necessary... if self.source.lx_execfile != None: sfiles = [] sfiles.extend(sortedFiles) self.logger.info("%d files process with lx_script" % len(sfiles)) sortedFiles = self.source.run_lx_script( sfiles, self.source.logger) self.logger.info("%d files will be ingested" % len(sortedFiles)) for file in sortedFiles: self.ingestFile(file)
class senderWmo(gateway.gateway): def __init__(self,path,client,logger): gateway.gateway.__init__(self, path, client, logger) self.client = client self.establishConnection() self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, # Number of files we read each time self.client.validation, # name validation self.client.patternMatching, # pattern matching self.client.mtime, # we don't check modification time True, # priority tree self.logger, eval(self.client.sorter), self.client) # Mechanism to eliminate multiple copies of a bulletin self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8*3600) # WMO's maximum bulletin size is 500 000 bytes self.set_maxLength( self.client.maxLength ) def set_maxLength(self,value): if value <= 0 : value = 500000 self.maxLength = value def shutdown(self): gateway.gateway.shutdown(self) resteDuBuffer, nbBullEnv = self.unSocketManagerWmo.closeProperly() self.write(resteDuBuffer) self.logger.info("Le senderWmo est mort. Traitement en cours reussi.") def establishConnection(self): # Instanciation du socketManagerWmo self.logger.debug("Instanciation du socketManagerWmo") self.unSocketManagerWmo = \ socketManagerWmo.socketManagerWmo( self.logger,type='master', \ port=self.client.port,\ remoteHost=self.client.host, timeout=self.client.timeout, flow=self.client) def read(self): if self.igniter.reloadMode == True: # We assign the defaults and reread the configuration file (in __init__) self.client.__init__(self.client.name, self.client.logger) self.set_maxLength( self.client.maxLength ) self.resetReader() self.cacheManager.clear() self.logger.info("Cache has been cleared") self.logger.info("Sender WMO has been reloaded") self.igniter.reloadMode = False self.reader.read() return self.reader.getFilesContent(self.client.batch) def write(self,data): #self.logger.info("%d nouveaux bulletins sont envoyes",len(data)) self.logger.info("%d new bulletins will be sent", len(data)) for index in range(len(data)): self.logger.start_timer() path = self.reader.sortedFiles[index] basename = os.path.basename( path ) try: tosplit = self.need_split( data[index] ) # need to be segmented... if tosplit : succes, nbBytesSent = self.write_segmented_data( data[index], path ) # all parts were cached... nothing to do if succes and nbBytesSent == 0 : self.logger.delivered("(%i Bytes) Bulletin %s delivered" % (len(data[index]), basename),path) self.unlink_file( path ) continue # send the entire bulletin else : # if in cache than it was already sent... nothing to do # priority 0 are retransmission and no check for duplicate priority = path.split('/')[-3] if self.client.nodups and priority != '0' and self.in_cache( data[index], True, path ) : #PS... same extra unlink as in AM sender call above is true, should it be false? #self.unlink_file( self.reader.sortedFiles[index] ) continue succes, nbBytesSent = self.write_data( data[index] ) #If the bulletin was sent successfully, erase the file. if succes: self.logger.delivered("(%i Bytes) Bulletin %s delivered" % (nbBytesSent, basename),path,nbBytesSent) self.unlink_file( path ) else: self.logger.info("%s: Sending problem" % path ) except Exception, e: # e==104 or e==110 or e==32 or e==107 => connection broken (type, value, tb) = sys.exc_info() self.logger.error("Type: %s, Value: %s" % (type, value)) # Log infos about tx speed if (self.totBytes > 1000000): self.logger.info(self.printSpeed() + " Bytes/sec") # Log infos about caching (stats, cached, total) = self.cacheManager.getStats() if total: percentage = "%2.2f %% of the last %i requests were cached (implied %i files were deleted)" % (cached/total * 100, total, cached) else: percentage = "No entries in the cache" self.logger.info("Caching stats: %s => %s" % (str(stats), percentage))
def setUp(self,logFile='log/DiskReader.log'): self.logger = Logger(logFile, 'DEBUG', 'Sub') self.logger = self.logger.getLogger() self.reader = DiskReader('txq/test/', 20, False, 5, False,True,self.logger)
def setUp(self, logFile='log/DiskReader.log'): self.logger = Logger(logFile, 'DEBUG', 'Sub') self.logger = self.logger.getLogger() self.reader = DiskReader('txq/test/', 20, False, 5, False, True, self.logger)
class senderAMQP: def __init__(self, client, logger): self.client = client # Client object (give access to all configuration options) self.timeout = client.timeout # No timeout for now self.logger = logger # Logger object self.connection = None # The connection self.igniter = None self.ssl = False self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) self.debugFile = False self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8 * 3600) # AMQP is there a max for message size # self.set_maxLength(self.client.maxLength) # statistics. self.totBytes = 0 self.initialTime = time.time() self.finalTime = None self._connect() def printSpeed(self): elapsedTime = time.time() - self.initialTime speed = self.totBytes / elapsedTime self.totBytes = 0 self.initialTime = time.time() return "Speed = %i" % int(speed) def setIgniter(self, igniter): self.igniter = igniter def resetReader(self): self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) def _connect(self): self.connection = None self.channel = None while True: try: host = self.client.host if self.client.port != None: host = host + ':' + self.client.port # connect self.connection = amqp.Connection(host, userid=self.client.user, password=self.client.passwd, ssl=self.ssl) self.channel = self.connection.channel() # what kind of exchange self.channel.access_request(self.client.exchange_realm, active=True, write=True) self.channel.exchange_declare(self.client.exchange_name, self.client.exchange_type, auto_delete=False) self.logger.info("AMQP Sender is now connected to: %s" % str(self.client.host)) break except: (type, value, tb) = sys.exc_info() self.logger.error("AMQP Sender cannot connected to: %s" % str(self.client.host)) self.logger.error( "Type: %s, Value: %s, Sleeping 5 seconds ..." % (type, value)) time.sleep(5) def shutdown(self): pass def read(self): if self.igniter.reloadMode == True: # We assign the defaults and reread the configuration file (in __init__) if self.channel != None: self.channel.close() if self.connection != None: self.connection.close() self.client.__init__(self.client.name, self.client.logger) self.resetReader() self.cacheManager.clear() self.logger.info("Cache has been cleared") self.logger.info("Sender AMQP has been reloaded") self.igniter.reloadMode = False self.reader.read() return self.reader.getFilesContent(self.client.batch) def write(self, data): if len(data) >= 1: self.logger.info("%d new messages will be sent", len(data)) for index in range(len(data)): self.logger.start_timer() # data info msg_body = data[index] nbBytesSent = len(msg_body) # if in cache than it was already sent... nothing to do # priority 0 is retransmission and is never suppressed path = self.reader.sortedFiles[index] priority = path.split('/')[-3] if self.client.nodups and priority != '0' and self.in_cache( data[index], True, path): #PS... same bug as in Senders AM, AMIS & WMO. #self.unlink_file( self.reader.sortedFiles[index] ) continue # get/check destination Name basename = os.path.basename(path) destName, destDir = self.client.getDestInfos(basename) if not destName: os.unlink(path) self.logger.info( 'No destination name: %s has been erased' % path) continue # build message parts = basename.split(':') if parts[-1][0:2] == '20': parts = parts[:-1] hdr = {'filename': ':'.join(parts)} msg = amqp.Message(msg_body, content_type=self.client.exchange_content, application_headers=hdr) # exchange_key pattern exchange_key = self.client.exchange_key if '$' in self.client.exchange_key: exchange_key = self.keyPattern(basename, self.client.exchange_key) self.logger.debug("exchange key = %s" % exchange_key) # publish message self.channel.basic_publish(msg, self.client.exchange_name, exchange_key) self.logger.delivered( "(%i Bytes) Message %s delivered" % (nbBytesSent, basename), path, nbBytesSent) self.unlink_file(path) self.totBytes += nbBytesSent else: time.sleep(1) def run(self): while True: data = self.read() try: self.write(data) except: (type, value, tb) = sys.exc_info() self.logger.error("Sender error! Type: %s, Value: %s" % (type, value)) # We close the connection try: self.channel.close() self.connection.close() except: (type, value, tb) = sys.exc_info() self.logger.error( "Problem in closing socket! Type: %s, Value: %s" % (type, value)) # We try to reconnect. self._connect() #time.sleep(0.2) # check if data in cache... if not it is added automatically def in_cache(self, data, unlink_it, path): already_in = False # If data is already in cache, we don't send it if self.cacheManager.find(data, 'md5') is not None: already_in = True if unlink_it: try: os.unlink(path) self.logger.info("suppressed duplicate send %s", os.path.basename(path)) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.info( "in_cache unable to unlink %s ! Type: %s, Value: %s" % (path, type, value)) return already_in