def ingestBulletinFile(self, igniter): from DiskReader import DiskReader import bulletinManager import bulletinManagerAm from PullFTP import PullFTP sleep_sec = 1 if self.source.type == 'pull-bulletin' or self.source.pull_script != None : sleep_sec = self.source.pull_sleep bullManager = bulletinManager.bulletinManager( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.bulletin_type == 'am' : bullManager = bulletinManagerAm.bulletinManagerAm( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.addSMHeader, PXPaths.STATION_TABLE, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.nodups : self.fileCache = CacheManager(maxEntries=self.source.cache_size, timeout=8*3600) reader = DiskReader(bullManager.pathSource, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter, self.source) while True: # If a SIGHUP signal is received ... if igniter.reloadMode == True: # We assign the defaults, reread configuration file for the source # and reread all configuration file for the clients (all this in __init__) if self.source.type == 'filter-bulletin' : self.source.__init__(self.source.name, self.source.logger, True, True) else : self.source.__init__(self.source.name, self.source.logger) bullManager = bulletinManager.bulletinManager( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.bulletin_type == 'am' : bullManager = bulletinManagerAm.bulletinManagerAm( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.addSMHeader, PXPaths.STATION_TABLE, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.nodups : self.fileCache = CacheManager(maxEntries=self.source.cache_size, timeout=8*3600) reader = DiskReader(bullManager.pathSource, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter,self.source) self.logger.info("Receiver has been reloaded") igniter.reloadMode = False # pull files in rxq directory if in pull mode if self.source.type == 'pull-bulletin' or self.source.pull_script != None : files = [] sleeping = os.path.isfile(PXPaths.RXQ + self.source.name + '/.sleep') if self.source.type == 'pull-bulletin' : puller = PullFTP(self.source,self.logger,sleeping) files = puller.get() puller.close() elif self.source.pull_script != None : files = self.source.pull_script(self.source,self.logger,sleeping) if not sleeping : self.logger.debug("Number of files pulled = %s" % len(files) ) else : self.logger.info("This pull is sleeping") # normal diskreader call for files reader.read() # processing the list if necessary... if self.source.lx_execfile != None and len(reader.sortedFiles) > 0: sfiles = [] sfiles.extend(reader.sortedFiles) self.logger.info("%d files process with lx_script" % len(sfiles)) sortedFiles = self.source.run_lx_script(sfiles,self.source.logger) reader.sortedFiles = sortedFiles # continue normally data = reader.getFilesContent(reader.batch) if len(data) == 0: time.sleep(sleep_sec) continue else: self.logger.info("%d bulletins will be ingested", len(data)) # Write (and name correctly) the bulletins to disk, erase them after for index in range(len(data)): # ignore duplicate if requiered duplicate = self.source.nodups and self.fileCache.find(data[index], 'md5') is not None #nb_bytes = len(data[index]) #self.logger.info("Lecture de %s: %d bytes" % (reader.sortedFiles[index], nb_bytes)) if not duplicate : # converting the file if necessary if self.source.fx_execfile != None : file = reader.sortedFiles[index] fxfile = self.source.run_fx_script(file,self.source.logger) # convertion did not work if fxfile == None : self.logger.warning("FX script ignored the file : %s" % os.path.basename(file) ) os.unlink(file) continue # file already in proper format elif fxfile == file : self.logger.warning("FX script kept the file as is : %s" % os.path.basename(file) ) # file converted... else : self.logger.info("FX script modified %s to %s " % (os.path.basename(file),os.path.basename(fxfile)) ) os.unlink(file) fp = open(fxfile,'r') dx = fp.read() fp.close() reader.sortedFiles[index] = fxfile data[index] = dx # writing/ingesting the bulletin if isinstance(bullManager,bulletinManagerAm.bulletinManagerAm): bullManager.writeBulletinToDisk(data[index], True) else : bullManager.writeBulletinToDisk(data[index], True, True) try: file = reader.sortedFiles[index] os.unlink(file) if duplicate : self.logger.info("suppressed duplicate file %s", os.path.basename(file)) self.logger.debug("%s has been erased", os.path.basename(file)) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.error("Unable to unlink %s ! Type: %s, Value: %s" % (reader.sortedFiles[index], type, value))
class senderWmo(gateway.gateway): def __init__(self,path,client,logger): gateway.gateway.__init__(self, path, client, logger) self.client = client self.establishConnection() self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, # Number of files we read each time self.client.validation, # name validation self.client.patternMatching, # pattern matching self.client.mtime, # we don't check modification time True, # priority tree self.logger, eval(self.client.sorter), self.client) # Mechanism to eliminate multiple copies of a bulletin self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8*3600) # WMO's maximum bulletin size is 500 000 bytes self.set_maxLength( self.client.maxLength ) def set_maxLength(self,value): if value <= 0 : value = 500000 self.maxLength = value def shutdown(self): gateway.gateway.shutdown(self) resteDuBuffer, nbBullEnv = self.unSocketManagerWmo.closeProperly() self.write(resteDuBuffer) self.logger.info("Le senderWmo est mort. Traitement en cours reussi.") def establishConnection(self): # Instanciation du socketManagerWmo self.logger.debug("Instanciation du socketManagerWmo") self.unSocketManagerWmo = \ socketManagerWmo.socketManagerWmo( self.logger,type='master', \ port=self.client.port,\ remoteHost=self.client.host, timeout=self.client.timeout, flow=self.client) def read(self): if self.igniter.reloadMode == True: # We assign the defaults and reread the configuration file (in __init__) self.client.__init__(self.client.name, self.client.logger) self.set_maxLength( self.client.maxLength ) self.resetReader() self.cacheManager.clear() self.logger.info("Cache has been cleared") self.logger.info("Sender WMO has been reloaded") self.igniter.reloadMode = False self.reader.read() return self.reader.getFilesContent(self.client.batch) def write(self,data): #self.logger.info("%d nouveaux bulletins sont envoyes",len(data)) self.logger.info("%d new bulletins will be sent", len(data)) for index in range(len(data)): self.logger.start_timer() path = self.reader.sortedFiles[index] basename = os.path.basename( path ) try: tosplit = self.need_split( data[index] ) # need to be segmented... if tosplit : succes, nbBytesSent = self.write_segmented_data( data[index], path ) # all parts were cached... nothing to do if succes and nbBytesSent == 0 : self.logger.delivered("(%i Bytes) Bulletin %s delivered" % (len(data[index]), basename),path) self.unlink_file( path ) continue # send the entire bulletin else : # if in cache than it was already sent... nothing to do # priority 0 are retransmission and no check for duplicate priority = path.split('/')[-3] if self.client.nodups and priority != '0' and self.in_cache( data[index], True, path ) : #PS... same extra unlink as in AM sender call above is true, should it be false? #self.unlink_file( self.reader.sortedFiles[index] ) continue succes, nbBytesSent = self.write_data( data[index] ) #If the bulletin was sent successfully, erase the file. if succes: self.logger.delivered("(%i Bytes) Bulletin %s delivered" % (nbBytesSent, basename),path,nbBytesSent) self.unlink_file( path ) else: self.logger.info("%s: Sending problem" % path ) except Exception, e: # e==104 or e==110 or e==32 or e==107 => connection broken (type, value, tb) = sys.exc_info() self.logger.error("Type: %s, Value: %s" % (type, value)) # Log infos about tx speed if (self.totBytes > 1000000): self.logger.info(self.printSpeed() + " Bytes/sec") # Log infos about caching (stats, cached, total) = self.cacheManager.getStats() if total: percentage = "%2.2f %% of the last %i requests were cached (implied %i files were deleted)" % (cached/total * 100, total, cached) else: percentage = "No entries in the cache" self.logger.info("Caching stats: %s => %s" % (str(stats), percentage))
print "Longueur Max = %d" % MessageAFTN.MAX_TEXT_SIZE mm = MessageManager(logger, sourlient) reader = DiskReader("/apps/px/bulletins", 8) reader.read() reader.sort() """ for file in reader.getFilesContent(8): print file mm.setInfos(MessageParser(file).getHeader()) mm.printInfos() if mm.header: myMessage = MessageAFTN(logger, file, mm.stationID, mm.originatorAddress,mm.priority, mm.destAddress, mm.CSN, mm.filingTime, mm.dateTime) myMessage.printInfos() """ for file in reader.getFilesContent(8): #mm.setInfos('AACN02 CWAO13') myMessage = MessageAFTN(logger, file, mm.stationID, mm.address, 'GG', ['BIRDZQZZ', 'CYYZOWAC'], mm.CSN, '121800' , '12180001') #myMessage = MessageAFTN(logger, file, mm.stationID, mm.address, mm.priority, # mm.destAddress, mm.CSN, mm.filingTime, mm.dateTime) myMessage.printInfos() print mm.addHeaderToMessage(myMessage)
print "Longueur Max = %d" % MessageAFTN.MAX_TEXT_SIZE mm = MessageManager(logger, sourlient) reader = DiskReader("/apps/px/bulletins", 8) reader.read() reader.sort() """ for file in reader.getFilesContent(8): print file mm.setInfos(MessageParser(file).getHeader()) mm.printInfos() if mm.header: myMessage = MessageAFTN(logger, file, mm.stationID, mm.originatorAddress,mm.priority, mm.destAddress, mm.CSN, mm.filingTime, mm.dateTime) myMessage.printInfos() """ for file in reader.getFilesContent(8): #mm.setInfos('AACN02 CWAO13') myMessage = MessageAFTN(logger, file, mm.stationID, mm.address, 'GG', ['BIRDZQZZ', 'CYYZOWAC'], mm.CSN, '121800', '12180001') #myMessage = MessageAFTN(logger, file, mm.stationID, mm.address, mm.priority, # mm.destAddress, mm.CSN, mm.filingTime, mm.dateTime) myMessage.printInfos() print mm.addHeaderToMessage(myMessage)
def ingestBulletinFile(self, igniter): from DiskReader import DiskReader import bulletinManager import bulletinManagerAm from PullFTP import PullFTP sleep_sec = 1 if self.source.type == 'pull-bulletin' or self.source.pull_script != None: sleep_sec = self.source.pull_sleep bullManager = bulletinManager.bulletinManager( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.bulletin_type == 'am': bullManager = bulletinManagerAm.bulletinManagerAm( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.addSMHeader, PXPaths.STATION_TABLE, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.nodups: self.fileCache = CacheManager(maxEntries=self.source.cache_size, timeout=8 * 3600) reader = DiskReader(bullManager.pathSource, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter, self.source) while True: # If a SIGHUP signal is received ... if igniter.reloadMode == True: # We assign the defaults, reread configuration file for the source # and reread all configuration file for the clients (all this in __init__) if self.source.type == 'filter-bulletin': self.source.__init__(self.source.name, self.source.logger, True, True) else: self.source.__init__(self.source.name, self.source.logger) bullManager = bulletinManager.bulletinManager( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.bulletin_type == 'am': bullManager = bulletinManagerAm.bulletinManagerAm( self.ingestDir, self.logger, self.ingestDir, 99999, '\n', self.source.extension, self.source.routingTable, self.source.addSMHeader, PXPaths.STATION_TABLE, self.source.mapEnteteDelai, self.source, self.source.addStationInFilename) if self.source.nodups: self.fileCache = CacheManager( maxEntries=self.source.cache_size, timeout=8 * 3600) reader = DiskReader(bullManager.pathSource, self.source.batch, self.source.validation, self.source.patternMatching, self.source.mtime, False, self.source.logger, self.source.sorter, self.source) self.logger.info("Receiver has been reloaded") igniter.reloadMode = False # pull files in rxq directory if in pull mode if self.source.type == 'pull-bulletin' or self.source.pull_script != None: files = [] sleeping = os.path.isfile(PXPaths.RXQ + self.source.name + '/.sleep') if self.source.type == 'pull-bulletin': puller = PullFTP(self.source, self.logger, sleeping) files = puller.get() puller.close() elif self.source.pull_script != None: files = self.source.pull_script(self.source, self.logger, sleeping) if not sleeping: self.logger.debug("Number of files pulled = %s" % len(files)) else: self.logger.info("This pull is sleeping") # normal diskreader call for files reader.read() # processing the list if necessary... if self.source.lx_execfile != None and len(reader.sortedFiles) > 0: sfiles = [] sfiles.extend(reader.sortedFiles) self.logger.info("%d files process with lx_script" % len(sfiles)) sortedFiles = self.source.run_lx_script( sfiles, self.source.logger) reader.sortedFiles = sortedFiles # continue normally data = reader.getFilesContent(reader.batch) if len(data) == 0: time.sleep(sleep_sec) continue else: self.logger.info("%d bulletins will be ingested", len(data)) # Write (and name correctly) the bulletins to disk, erase them after for index in range(len(data)): # ignore duplicate if requiered duplicate = self.source.nodups and self.fileCache.find( data[index], 'md5') is not None #nb_bytes = len(data[index]) #self.logger.info("Lecture de %s: %d bytes" % (reader.sortedFiles[index], nb_bytes)) if not duplicate: # converting the file if necessary if self.source.fx_execfile != None: file = reader.sortedFiles[index] fxfile = self.source.run_fx_script( file, self.source.logger) # convertion did not work if fxfile == None: self.logger.warning( "FX script ignored the file : %s" % os.path.basename(file)) os.unlink(file) continue # file already in proper format elif fxfile == file: self.logger.warning( "FX script kept the file as is : %s" % os.path.basename(file)) # file converted... else: self.logger.info("FX script modified %s to %s " % (os.path.basename(file), os.path.basename(fxfile))) os.unlink(file) fp = open(fxfile, 'r') dx = fp.read() fp.close() reader.sortedFiles[index] = fxfile data[index] = dx # writing/ingesting the bulletin if isinstance(bullManager, bulletinManagerAm.bulletinManagerAm): bullManager.writeBulletinToDisk(data[index], True) else: bullManager.writeBulletinToDisk( data[index], True, True) try: file = reader.sortedFiles[index] os.unlink(file) if duplicate: self.logger.info("suppressed duplicate file %s", os.path.basename(file)) self.logger.debug("%s has been erased", os.path.basename(file)) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.error( "Unable to unlink %s ! Type: %s, Value: %s" % (reader.sortedFiles[index], type, value))
class senderAMQP: def __init__(self, client, logger): self.client = client # Client object (give access to all configuration options) self.timeout = client.timeout # No timeout for now self.logger = logger # Logger object self.connection = None # The connection self.igniter = None self.ssl = False self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) self.debugFile = False self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8*3600) # AMQP is there a max for message size # self.set_maxLength(self.client.maxLength) # statistics. self.totBytes = 0 self.initialTime = time.time() self.finalTime = None self._connect() def printSpeed(self): elapsedTime = time.time() - self.initialTime speed = self.totBytes/elapsedTime self.totBytes = 0 self.initialTime = time.time() return "Speed = %i" % int(speed) def setIgniter(self, igniter): self.igniter = igniter def resetReader(self): self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) def _connect(self): self.connection = None self.channel = None while True: try: host = self.client.host if self.client.port != None : host = host + ':' + self.client.port # connect self.connection = amqp.Connection(host, userid=self.client.user, password=self.client.passwd, ssl=self.ssl) self.channel = self.connection.channel() # what kind of exchange self.channel.access_request(self.client.exchange_realm, active=True, write=True) self.channel.exchange_declare(self.client.exchange_name, self.client.exchange_type, auto_delete=False) self.logger.info("AMQP Sender is now connected to: %s" % str(self.client.host)) break except: (type, value, tb) = sys.exc_info() self.logger.error("AMQP Sender cannot connected to: %s" % str(self.client.host)) self.logger.error("Type: %s, Value: %s, Sleeping 5 seconds ..." % (type, value)) time.sleep(5) def shutdown(self): pass def read(self): if self.igniter.reloadMode == True: # We assign the defaults and reread the configuration file (in __init__) if self.channel != None : self.channel.close() if self.connection != None : self.connection.close() self.client.__init__(self.client.name, self.client.logger) self.resetReader() self.cacheManager.clear() self.logger.info("Cache has been cleared") self.logger.info("Sender AMQP has been reloaded") self.igniter.reloadMode = False self.reader.read() return self.reader.getFilesContent(self.client.batch) def write(self, data): if len(data) >= 1: self.logger.info("%d new messages will be sent", len(data) ) for index in range(len(data)): self.logger.start_timer() # data info msg_body = data[index] nbBytesSent = len(msg_body) # if in cache than it was already sent... nothing to do # priority 0 is retransmission and is never suppressed path = self.reader.sortedFiles[index] priority = path.split('/')[-3] if self.client.nodups and priority != '0' and self.in_cache( data[index], True, path ) : #PS... same bug as in Senders AM, AMIS & WMO. #self.unlink_file( self.reader.sortedFiles[index] ) continue # get/check destination Name basename = os.path.basename(path) destName, destDir = self.client.getDestInfos(basename) if not destName : os.unlink(path) self.logger.info('No destination name: %s has been erased' % path) continue # build message parts = basename.split(':') if parts[-1][0:2] == '20' : parts = parts[:-1] hdr = {'filename': ':'.join(parts) } msg = amqp.Message(msg_body, content_type= self.client.exchange_content,application_headers=hdr) # exchange_key pattern exchange_key = self.client.exchange_key if '$' in self.client.exchange_key : exchange_key = self.keyPattern(basename,self.client.exchange_key) self.logger.debug("exchange key = %s" % exchange_key) # publish message self.channel.basic_publish(msg, self.client.exchange_name, exchange_key ) self.logger.delivered("(%i Bytes) Message %s delivered" % (nbBytesSent, basename),path,nbBytesSent) self.unlink_file( path ) self.totBytes += nbBytesSent else: time.sleep(1) def run(self): while True: data = self.read() try: self.write(data) except: (type, value, tb) = sys.exc_info() self.logger.error("Sender error! Type: %s, Value: %s" % (type, value)) # We close the connection try: self.channel.close() self.connection.close() except: (type, value, tb) = sys.exc_info() self.logger.error("Problem in closing socket! Type: %s, Value: %s" % (type, value)) # We try to reconnect. self._connect() #time.sleep(0.2) # check if data in cache... if not it is added automatically def in_cache(self,data,unlink_it,path): already_in = False # If data is already in cache, we don't send it if self.cacheManager.find(data, 'md5') is not None: already_in = True if unlink_it : try: os.unlink(path) self.logger.info("suppressed duplicate send %s", os.path.basename(path)) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.info("in_cache unable to unlink %s ! Type: %s, Value: %s" % (path, type, value)) return already_in
class senderAMQP: def __init__(self, client, logger): self.client = client # Client object (give access to all configuration options) self.timeout = client.timeout # No timeout for now self.logger = logger # Logger object self.connection = None # The connection self.igniter = None self.ssl = False self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) self.debugFile = False self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8 * 3600) # AMQP is there a max for message size # self.set_maxLength(self.client.maxLength) # statistics. self.totBytes = 0 self.initialTime = time.time() self.finalTime = None self._connect() def printSpeed(self): elapsedTime = time.time() - self.initialTime speed = self.totBytes / elapsedTime self.totBytes = 0 self.initialTime = time.time() return "Speed = %i" % int(speed) def setIgniter(self, igniter): self.igniter = igniter def resetReader(self): self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) def _connect(self): self.connection = None self.channel = None while True: try: host = self.client.host if self.client.port != None: host = host + ':' + self.client.port # connect self.connection = amqp.Connection(host, userid=self.client.user, password=self.client.passwd, ssl=self.ssl) self.channel = self.connection.channel() # what kind of exchange self.channel.access_request(self.client.exchange_realm, active=True, write=True) self.channel.exchange_declare(self.client.exchange_name, self.client.exchange_type, auto_delete=False) self.logger.info("AMQP Sender is now connected to: %s" % str(self.client.host)) break except: (type, value, tb) = sys.exc_info() self.logger.error("AMQP Sender cannot connected to: %s" % str(self.client.host)) self.logger.error( "Type: %s, Value: %s, Sleeping 5 seconds ..." % (type, value)) time.sleep(5) def shutdown(self): pass def read(self): if self.igniter.reloadMode == True: # We assign the defaults and reread the configuration file (in __init__) if self.channel != None: self.channel.close() if self.connection != None: self.connection.close() self.client.__init__(self.client.name, self.client.logger) self.resetReader() self.cacheManager.clear() self.logger.info("Cache has been cleared") self.logger.info("Sender AMQP has been reloaded") self.igniter.reloadMode = False self.reader.read() return self.reader.getFilesContent(self.client.batch) def write(self, data): if len(data) >= 1: self.logger.info("%d new messages will be sent", len(data)) for index in range(len(data)): self.logger.start_timer() # data info msg_body = data[index] nbBytesSent = len(msg_body) # if in cache than it was already sent... nothing to do # priority 0 is retransmission and is never suppressed path = self.reader.sortedFiles[index] priority = path.split('/')[-3] if self.client.nodups and priority != '0' and self.in_cache( data[index], True, path): #PS... same bug as in Senders AM, AMIS & WMO. #self.unlink_file( self.reader.sortedFiles[index] ) continue # get/check destination Name basename = os.path.basename(path) destName, destDir = self.client.getDestInfos(basename) if not destName: os.unlink(path) self.logger.info( 'No destination name: %s has been erased' % path) continue # build message parts = basename.split(':') if parts[-1][0:2] == '20': parts = parts[:-1] hdr = {'filename': ':'.join(parts)} msg = amqp.Message(msg_body, content_type=self.client.exchange_content, application_headers=hdr) # exchange_key pattern exchange_key = self.client.exchange_key if '$' in self.client.exchange_key: exchange_key = self.keyPattern(basename, self.client.exchange_key) self.logger.debug("exchange key = %s" % exchange_key) # publish message self.channel.basic_publish(msg, self.client.exchange_name, exchange_key) self.logger.delivered( "(%i Bytes) Message %s delivered" % (nbBytesSent, basename), path, nbBytesSent) self.unlink_file(path) self.totBytes += nbBytesSent else: time.sleep(1) def run(self): while True: data = self.read() try: self.write(data) except: (type, value, tb) = sys.exc_info() self.logger.error("Sender error! Type: %s, Value: %s" % (type, value)) # We close the connection try: self.channel.close() self.connection.close() except: (type, value, tb) = sys.exc_info() self.logger.error( "Problem in closing socket! Type: %s, Value: %s" % (type, value)) # We try to reconnect. self._connect() #time.sleep(0.2) # check if data in cache... if not it is added automatically def in_cache(self, data, unlink_it, path): already_in = False # If data is already in cache, we don't send it if self.cacheManager.find(data, 'md5') is not None: already_in = True if unlink_it: try: os.unlink(path) self.logger.info("suppressed duplicate send %s", os.path.basename(path)) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.info( "in_cache unable to unlink %s ! Type: %s, Value: %s" % (path, type, value)) return already_in