class senderAMQP: def __init__(self, client, logger): self.client = client # Client object (give access to all configuration options) self.timeout = client.timeout # No timeout for now self.logger = logger # Logger object self.connection = None # The connection self.igniter = None self.ssl = False self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) self.debugFile = False self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8*3600) # AMQP is there a max for message size # self.set_maxLength(self.client.maxLength) # statistics. self.totBytes = 0 self.initialTime = time.time() self.finalTime = None self._connect() def printSpeed(self): elapsedTime = time.time() - self.initialTime speed = self.totBytes/elapsedTime self.totBytes = 0 self.initialTime = time.time() return "Speed = %i" % int(speed) def setIgniter(self, igniter): self.igniter = igniter def resetReader(self): self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) def _connect(self): self.connection = None self.channel = None while True: try: host = self.client.host if self.client.port != None : host = host + ':' + self.client.port # connect self.connection = amqp.Connection(host, userid=self.client.user, password=self.client.passwd, ssl=self.ssl) self.channel = self.connection.channel() # what kind of exchange self.channel.access_request(self.client.exchange_realm, active=True, write=True) self.channel.exchange_declare(self.client.exchange_name, self.client.exchange_type, auto_delete=False) self.logger.info("AMQP Sender is now connected to: %s" % str(self.client.host)) break except: (type, value, tb) = sys.exc_info() self.logger.error("AMQP Sender cannot connected to: %s" % str(self.client.host)) self.logger.error("Type: %s, Value: %s, Sleeping 5 seconds ..." % (type, value)) time.sleep(5) def shutdown(self): pass def read(self): if self.igniter.reloadMode == True: # We assign the defaults and reread the configuration file (in __init__) if self.channel != None : self.channel.close() if self.connection != None : self.connection.close() self.client.__init__(self.client.name, self.client.logger) self.resetReader() self.cacheManager.clear() self.logger.info("Cache has been cleared") self.logger.info("Sender AMQP has been reloaded") self.igniter.reloadMode = False self.reader.read() return self.reader.getFilesContent(self.client.batch) def write(self, data): if len(data) >= 1: self.logger.info("%d new messages will be sent", len(data) ) for index in range(len(data)): self.logger.start_timer() # data info msg_body = data[index] nbBytesSent = len(msg_body) # if in cache than it was already sent... nothing to do # priority 0 is retransmission and is never suppressed path = self.reader.sortedFiles[index] priority = path.split('/')[-3] if self.client.nodups and priority != '0' and self.in_cache( data[index], True, path ) : #PS... same bug as in Senders AM, AMIS & WMO. #self.unlink_file( self.reader.sortedFiles[index] ) continue # get/check destination Name basename = os.path.basename(path) destName, destDir = self.client.getDestInfos(basename) if not destName : os.unlink(path) self.logger.info('No destination name: %s has been erased' % path) continue # build message parts = basename.split(':') if parts[-1][0:2] == '20' : parts = parts[:-1] hdr = {'filename': ':'.join(parts) } msg = amqp.Message(msg_body, content_type= self.client.exchange_content,application_headers=hdr) # exchange_key pattern exchange_key = self.client.exchange_key if '$' in self.client.exchange_key : exchange_key = self.keyPattern(basename,self.client.exchange_key) self.logger.debug("exchange key = %s" % exchange_key) # publish message self.channel.basic_publish(msg, self.client.exchange_name, exchange_key ) self.logger.delivered("(%i Bytes) Message %s delivered" % (nbBytesSent, basename),path,nbBytesSent) self.unlink_file( path ) self.totBytes += nbBytesSent else: time.sleep(1) def run(self): while True: data = self.read() try: self.write(data) except: (type, value, tb) = sys.exc_info() self.logger.error("Sender error! Type: %s, Value: %s" % (type, value)) # We close the connection try: self.channel.close() self.connection.close() except: (type, value, tb) = sys.exc_info() self.logger.error("Problem in closing socket! Type: %s, Value: %s" % (type, value)) # We try to reconnect. self._connect() #time.sleep(0.2) # check if data in cache... if not it is added automatically def in_cache(self,data,unlink_it,path): already_in = False # If data is already in cache, we don't send it if self.cacheManager.find(data, 'md5') is not None: already_in = True if unlink_it : try: os.unlink(path) self.logger.info("suppressed duplicate send %s", os.path.basename(path)) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.info("in_cache unable to unlink %s ! Type: %s, Value: %s" % (path, type, value)) return already_in
class senderWmo(gateway.gateway): def __init__(self,path,client,logger): gateway.gateway.__init__(self, path, client, logger) self.client = client self.establishConnection() self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, # Number of files we read each time self.client.validation, # name validation self.client.patternMatching, # pattern matching self.client.mtime, # we don't check modification time True, # priority tree self.logger, eval(self.client.sorter), self.client) # Mechanism to eliminate multiple copies of a bulletin self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8*3600) # WMO's maximum bulletin size is 500 000 bytes self.set_maxLength( self.client.maxLength ) def set_maxLength(self,value): if value <= 0 : value = 500000 self.maxLength = value def shutdown(self): gateway.gateway.shutdown(self) resteDuBuffer, nbBullEnv = self.unSocketManagerWmo.closeProperly() self.write(resteDuBuffer) self.logger.info("Le senderWmo est mort. Traitement en cours reussi.") def establishConnection(self): # Instanciation du socketManagerWmo self.logger.debug("Instanciation du socketManagerWmo") self.unSocketManagerWmo = \ socketManagerWmo.socketManagerWmo( self.logger,type='master', \ port=self.client.port,\ remoteHost=self.client.host, timeout=self.client.timeout, flow=self.client) def read(self): if self.igniter.reloadMode == True: # We assign the defaults and reread the configuration file (in __init__) self.client.__init__(self.client.name, self.client.logger) self.set_maxLength( self.client.maxLength ) self.resetReader() self.cacheManager.clear() self.logger.info("Cache has been cleared") self.logger.info("Sender WMO has been reloaded") self.igniter.reloadMode = False self.reader.read() return self.reader.getFilesContent(self.client.batch) def write(self,data): #self.logger.info("%d nouveaux bulletins sont envoyes",len(data)) self.logger.info("%d new bulletins will be sent", len(data)) for index in range(len(data)): self.logger.start_timer() path = self.reader.sortedFiles[index] basename = os.path.basename( path ) try: tosplit = self.need_split( data[index] ) # need to be segmented... if tosplit : succes, nbBytesSent = self.write_segmented_data( data[index], path ) # all parts were cached... nothing to do if succes and nbBytesSent == 0 : self.logger.delivered("(%i Bytes) Bulletin %s delivered" % (len(data[index]), basename),path) self.unlink_file( path ) continue # send the entire bulletin else : # if in cache than it was already sent... nothing to do # priority 0 are retransmission and no check for duplicate priority = path.split('/')[-3] if self.client.nodups and priority != '0' and self.in_cache( data[index], True, path ) : #PS... same extra unlink as in AM sender call above is true, should it be false? #self.unlink_file( self.reader.sortedFiles[index] ) continue succes, nbBytesSent = self.write_data( data[index] ) #If the bulletin was sent successfully, erase the file. if succes: self.logger.delivered("(%i Bytes) Bulletin %s delivered" % (nbBytesSent, basename),path,nbBytesSent) self.unlink_file( path ) else: self.logger.info("%s: Sending problem" % path ) except Exception, e: # e==104 or e==110 or e==32 or e==107 => connection broken (type, value, tb) = sys.exc_info() self.logger.error("Type: %s, Value: %s" % (type, value)) # Log infos about tx speed if (self.totBytes > 1000000): self.logger.info(self.printSpeed() + " Bytes/sec") # Log infos about caching (stats, cached, total) = self.cacheManager.getStats() if total: percentage = "%2.2f %% of the last %i requests were cached (implied %i files were deleted)" % (cached/total * 100, total, cached) else: percentage = "No entries in the cache" self.logger.info("Caching stats: %s => %s" % (str(stats), percentage))
class senderAMQP: def __init__(self, client, logger): self.client = client # Client object (give access to all configuration options) self.timeout = client.timeout # No timeout for now self.logger = logger # Logger object self.connection = None # The connection self.igniter = None self.ssl = False self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) self.debugFile = False self.cacheManager = CacheManager(maxEntries=self.client.cache_size, timeout=8 * 3600) # AMQP is there a max for message size # self.set_maxLength(self.client.maxLength) # statistics. self.totBytes = 0 self.initialTime = time.time() self.finalTime = None self._connect() def printSpeed(self): elapsedTime = time.time() - self.initialTime speed = self.totBytes / elapsedTime self.totBytes = 0 self.initialTime = time.time() return "Speed = %i" % int(speed) def setIgniter(self, igniter): self.igniter = igniter def resetReader(self): self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, self.client.validation, self.client.patternMatching, self.client.mtime, True, self.logger, eval(self.client.sorter), self.client) def _connect(self): self.connection = None self.channel = None while True: try: host = self.client.host if self.client.port != None: host = host + ':' + self.client.port # connect self.connection = amqp.Connection(host, userid=self.client.user, password=self.client.passwd, ssl=self.ssl) self.channel = self.connection.channel() # what kind of exchange self.channel.access_request(self.client.exchange_realm, active=True, write=True) self.channel.exchange_declare(self.client.exchange_name, self.client.exchange_type, auto_delete=False) self.logger.info("AMQP Sender is now connected to: %s" % str(self.client.host)) break except: (type, value, tb) = sys.exc_info() self.logger.error("AMQP Sender cannot connected to: %s" % str(self.client.host)) self.logger.error( "Type: %s, Value: %s, Sleeping 5 seconds ..." % (type, value)) time.sleep(5) def shutdown(self): pass def read(self): if self.igniter.reloadMode == True: # We assign the defaults and reread the configuration file (in __init__) if self.channel != None: self.channel.close() if self.connection != None: self.connection.close() self.client.__init__(self.client.name, self.client.logger) self.resetReader() self.cacheManager.clear() self.logger.info("Cache has been cleared") self.logger.info("Sender AMQP has been reloaded") self.igniter.reloadMode = False self.reader.read() return self.reader.getFilesContent(self.client.batch) def write(self, data): if len(data) >= 1: self.logger.info("%d new messages will be sent", len(data)) for index in range(len(data)): self.logger.start_timer() # data info msg_body = data[index] nbBytesSent = len(msg_body) # if in cache than it was already sent... nothing to do # priority 0 is retransmission and is never suppressed path = self.reader.sortedFiles[index] priority = path.split('/')[-3] if self.client.nodups and priority != '0' and self.in_cache( data[index], True, path): #PS... same bug as in Senders AM, AMIS & WMO. #self.unlink_file( self.reader.sortedFiles[index] ) continue # get/check destination Name basename = os.path.basename(path) destName, destDir = self.client.getDestInfos(basename) if not destName: os.unlink(path) self.logger.info( 'No destination name: %s has been erased' % path) continue # build message parts = basename.split(':') if parts[-1][0:2] == '20': parts = parts[:-1] hdr = {'filename': ':'.join(parts)} msg = amqp.Message(msg_body, content_type=self.client.exchange_content, application_headers=hdr) # exchange_key pattern exchange_key = self.client.exchange_key if '$' in self.client.exchange_key: exchange_key = self.keyPattern(basename, self.client.exchange_key) self.logger.debug("exchange key = %s" % exchange_key) # publish message self.channel.basic_publish(msg, self.client.exchange_name, exchange_key) self.logger.delivered( "(%i Bytes) Message %s delivered" % (nbBytesSent, basename), path, nbBytesSent) self.unlink_file(path) self.totBytes += nbBytesSent else: time.sleep(1) def run(self): while True: data = self.read() try: self.write(data) except: (type, value, tb) = sys.exc_info() self.logger.error("Sender error! Type: %s, Value: %s" % (type, value)) # We close the connection try: self.channel.close() self.connection.close() except: (type, value, tb) = sys.exc_info() self.logger.error( "Problem in closing socket! Type: %s, Value: %s" % (type, value)) # We try to reconnect. self._connect() #time.sleep(0.2) # check if data in cache... if not it is added automatically def in_cache(self, data, unlink_it, path): already_in = False # If data is already in cache, we don't send it if self.cacheManager.find(data, 'md5') is not None: already_in = True if unlink_it: try: os.unlink(path) self.logger.info("suppressed duplicate send %s", os.path.basename(path)) except OSError, e: (type, value, tb) = sys.exc_info() self.logger.info( "in_cache unable to unlink %s ! Type: %s, Value: %s" % (path, type, value)) return already_in