def __init__(self, logger=None, sourlient=None, reloadMode=False): AFTNPaths.normalPaths(sourlient.name) PXPaths.normalPaths() self.logger = logger # Logger object self.sourlient = sourlient # Sourlient object self.name = sourlient.name # Transceiver's name self.stationID = sourlient.stationID # Value read from config. file self.otherStationID = sourlient.otherStationID # Provider (MHS) Station ID self.address = sourlient.address # 8-letter group identifying the message originator (CYHQUSER) self.otherAddress = sourlient.otherAddress # 8-letter group identifying the provider's address (CYHQMHSN) self.routingTable = sourlient.routingTable # Routing table name self.subscriber = sourlient.subscriber # Boolean indicating if this is a subscriber or a provider self.bullManager = bulletinManager(PXPaths.RXQ + self.name, self.logger, PXPaths.RXQ + self.name, 9999, '\n', self.sourlient.extension, self.routingTable, None, self.sourlient, True) self.drp = self.bullManager.drp self.sp = StationParser(PXPaths.STATION_TABLE, logger) self.sp.parse() self.priorities = {'1':'FF', '2':'FF', '3':'GG', '4':'GG', '5':'GG'} if not reloadMode: self.afterInit()
def __init__(self, rootPath=""): self.rootPath = rootPath PXPaths.normalPaths(self.rootPath) SystemManager.__init__(self) self.LOG = PXPaths.LOG # Will be used by DirCopier self.debug = 0
def test_PXPaths(self): PXPaths.normalPaths('') self.assertEqual((PXPaths.ROOT,PXPaths.LIB,PXPaths.LOG,PXPaths.ETC,PXPaths.FXQ,PXPaths.RXQ,PXPaths.TXQ,PXPaths.DB, PXPaths.ROUTING_TABLE,PXPaths.STATION_TABLE,PXPaths.FX_CONF,PXPaths.RX_CONF,PXPaths.TX_CONF,PXPaths.TRX_CONF, PXPaths.SCRIPTS,PXPaths.LAT,PXPaths.LAT_RESULTS,PXPaths.LAT_TMP,PXPaths.SHELL_PARSER,PXPaths.PX_DATA), ('./', './lib/', './log/', './etc/', './fxq/', './rxq/', './txq/', './db/', './etc/pxRouting.conf', './etc/stations.conf', './etc/fx/', './etc/rx/', './etc/tx/', './etc/trx/', './etc/scripts/', './latencies/', './latencies/results/', './latencies/tmp/', './lib/shellParser.py', './data/'))
def __init__(self, latencier, imageName=None): PXPaths.normalPaths() self.text = "" # Results in plain text self.html = "" # Results in html self.latencier = latencier # Latencier (PDS or PX) self.imageName = imageName # Came from the plotter self.date = self.latencier.date # Date in ISO Format self.setTextResults() self.saveResults(PXPaths.LAT_RESULTS)
def __init__(self, latencier, imageName=None): PXPaths.normalPaths() self.text = '' # Results in plain text self.html = '' # Results in html self.latencier = latencier # Latencier (PDS or PX) self.imageName = imageName # Came from the plotter self.date = self.latencier.date # Date in ISO Format self.setTextResults() self.saveResults(PXPaths.LAT_RESULTS)
def __init__(self, nopull=False, keep=False, date=None, xstats=False): PXPaths.normalPaths() self.manager = PXManager() #self.logger = logger.getLogger() # Date for which we want to obtain stats if date == None: self.date = dateLib.getYesterdayFormatted() # ISO Date else: self.date = date self.dateDashed = dateLib.getISODateDashed(self.date) self.machines = [] # Machines were the logs can be found self.sources = [ ] # Sources for which we will check arrival time of the products self.client = [ ] # Client for which we will check delivery time of the products (ONLY ONE ENTRY in the list) self.messages = [] # FIXME: Special messages coming from weird results self.nopull = nopull # Do not pull the necessary files (we suppose they are already downloaded) self.keep = keep # Erase all the files present before downloading new files self.xstats = xstats # Boolean that determine if we will use xferlog in making stats self.goodRx = [] # Lines matching initial values self.goodTx = [] # Lines matching initial values self.goodXferlog = [] # Lines matching initial values self.receivingInfos = { } # Dict. addressed by filename and containing a tuple of (formatted date, date in seconds, machine) self.sendingInfos = { } # Dict. addressed by filename and containing a tuple of (formatted date, date in seconds, machine) self.xferlogInfos = { } # Dict. addressed by filename and containing a tuple of (formatted date, date in seconds, machine) self.stats = {} # Final stats self.sortedStats = [] # Final sorted stats self.max = 0 # Maximum latency time in seconds self.min = sys.maxint # Minimum latency time in seconds self.mean = 0 # Mean latency time in seconds self.latencyThreshold = 15 # We don't want to go over this threshold (in seconds) self.overThreshold = 0 # Number of files with latency over threshold self.underThresholdP = 0 # Percentage of files for which the latency is equal or under threshold self.meanWaiting = 0 # Mean waiting time before being noticed by the PDS self.random = str( random.random() )[2:] # Unique identificator permitting the program to be run in parallel self.system = None # 'PDS' or 'PX' self.rejected = 0 # Count of rejected files self.maxInfos = ['NO FILE', ('00:00:00', 'No machine', 0) ] # Informations about the max.
def test_PXPaths(self): PXPaths.normalPaths('') self.assertEqual( (PXPaths.ROOT, PXPaths.LIB, PXPaths.LOG, PXPaths.ETC, PXPaths.FXQ, PXPaths.RXQ, PXPaths.TXQ, PXPaths.DB, PXPaths.ROUTING_TABLE, PXPaths.STATION_TABLE, PXPaths.FX_CONF, PXPaths.RX_CONF, PXPaths.TX_CONF, PXPaths.TRX_CONF, PXPaths.SCRIPTS, PXPaths.LAT, PXPaths.LAT_RESULTS, PXPaths.LAT_TMP, PXPaths.SHELL_PARSER, PXPaths.PX_DATA), ('./', './lib/', './log/', './etc/', './fxq/', './rxq/', './txq/', './db/', './etc/pxRouting.conf', './etc/stations.conf', './etc/fx/', './etc/rx/', './etc/tx/', './etc/trx/', './etc/scripts/', './latencies/', './latencies/results/', './latencies/tmp/', './lib/shellParser.py', './data/'))
def __init__(self, sourlient): AFTNPaths.normalPaths(sourlient.name) PXPaths.normalPaths() self.sysman = SystemManager() # General system manager self.sourlient = sourlient # Sourlient (Source/Client) object containing configuration infos. self.logger = sourlient.logger # Logger object self.subscriber = sourlient.subscriber # Determine if it will act like a subscriber or a provider(MHS) self.host = sourlient.host # Remote host (name or ip) self.portR = sourlient.portR # Receiving port self.portS = sourlient.portS # Sending port self.batch = sourlient.batch # Number of files we read in a pass (20) self.timeout = sourlient.timeout # Timeout time in seconds (default = 10 seconds) self.sleepBetweenConnect = int('10') # Time (in seconds) between connection trials self.slow = sourlient.slow # Sleeps are added when we want to be able to decrypt log entries self.igniter = None # Igniter object (link to pid) self.writePath = AFTNPaths.RECEIVED # Where we write messages we receive self.archivePath = AFTNPaths.SENT # Where we put sent messages self.specialOrdersPath = AFTNPaths.SPECIAL_ORDERS # Where we put special orders # Paths creation self.sysman.createDir(PXPaths.TXQ + self.sourlient.name) self.sysman.createDir(self.writePath) self.sysman.createDir(self.archivePath) self.sysman.createDir(self.specialOrdersPath) self.mm = MessageManager(self.logger, self.sourlient) # AFTN Protocol is implemented in MessageManager Object self.remoteAddress = None # Remote address (where we will connect()) self.socket = None # Socket object self.dataFromFiles = [] # A list of tuples (content, filename) obtained from a DiskReader self.reader = DiskReader(PXPaths.TXQ + self.sourlient.name, self.sourlient.batch, self.sourlient.validation, self.sourlient.diskReaderPatternMatching, self.sourlient.mtime, True, self.logger, eval(self.sourlient.sorter), self.sourlient) self.debug = True # Debugging switch self.justConnect = False # Boolean that indicates when a connexion just occur self.totBytes = 0 #self.printInitInfos() self.makeConnection()
def findClient(self, clusters=None, ip="", name=""): """ clusters: a list of clusters (ex: ['pds', 'px', 'pxatx']) ip: IP address (ex: '192.168.1.1') name: hostname (ex: 'metmgr') Only one argument in (ip, name) must be non null """ import socket clusters = clusters or self.clusters cliClust = [] if ip: try: # get the first part of the fully qualified domain name name = socket.gethostbyaddr(ip)[0].split('.')[0] except: pass elif name: try: ip = socket.gethostbyname(name) except: pass for cluster in clusters: clusterRoot = self.rootPath + cluster PXPaths.normalPaths(clusterRoot) if ip and name: command = "grep -l -E '%s|%s' %s" % ( ip, name, PXPaths.TX_CONF + "*.conf") elif ip: command = "grep -l -E '%s' %s" % (ip, PXPaths.TX_CONF + "*.conf") elif name: command = "grep -l -E '%s' %s" % (name, PXPaths.TX_CONF + "*.conf") #print "%s" % cluster.upper() output = commands.getoutput(command) clients = [(os.path.basename(cli)[:-5], cluster) for cli in output.split()] cliClust.extend(clients) PXPaths.normalPaths() # Reset PXPaths variables return cliClust
def __init__(self, nopull=False, keep=False, date=None, xstats=False): PXPaths.normalPaths() self.manager = PXManager() #self.logger = logger.getLogger() # Date for which we want to obtain stats if date == None: self.date = dateLib.getYesterdayFormatted() # ISO Date else: self.date = date self.dateDashed = dateLib.getISODateDashed(self.date) self.machines = [] # Machines were the logs can be found self.sources = [] # Sources for which we will check arrival time of the products self.client = [] # Client for which we will check delivery time of the products (ONLY ONE ENTRY in the list) self.messages = [] # FIXME: Special messages coming from weird results self.nopull = nopull # Do not pull the necessary files (we suppose they are already downloaded) self.keep = keep # Erase all the files present before downloading new files self.xstats = xstats # Boolean that determine if we will use xferlog in making stats self.goodRx = [] # Lines matching initial values self.goodTx = [] # Lines matching initial values self.goodXferlog = [] # Lines matching initial values self.receivingInfos = {} # Dict. addressed by filename and containing a tuple of (formatted date, date in seconds, machine) self.sendingInfos = {} # Dict. addressed by filename and containing a tuple of (formatted date, date in seconds, machine) self.xferlogInfos = {} # Dict. addressed by filename and containing a tuple of (formatted date, date in seconds, machine) self.stats = {} # Final stats self.sortedStats = [] # Final sorted stats self.max = 0 # Maximum latency time in seconds self.min = sys.maxint # Minimum latency time in seconds self.mean = 0 # Mean latency time in seconds self.latencyThreshold = 15 # We don't want to go over this threshold (in seconds) self.overThreshold = 0 # Number of files with latency over threshold self.underThresholdP = 0 # Percentage of files for which the latency is equal or under threshold self.meanWaiting = 0 # Mean waiting time before being noticed by the PDS self.random = str(random.random())[2:] # Unique identificator permitting the program to be run in parallel self.system = None # 'PDS' or 'PX' self.rejected = 0 # Count of rejected files self.maxInfos = ['NO FILE', ('00:00:00', 'No machine', 0)] # Informations about the max.
def __init__(self): PXPaths.normalPaths() self.debug = 0 self.lang = pxRetransLanguage.french # masterConfPath is a directory where configuration files for all clusters can be found. # With 2 cluster named cluster1 and cluster2, the config. files will be under: # "masterConfPath"/cluster1/etc/ # "masterConfPath"/cluster2/etc/ # When we have in fact no cluster (a single machine), the cluster name will be "." and we # we thus have "masterConfPath"/./etc/ # This "scheme" works well when px is installled under a root directory (ex: /apps/px/) # It works less well under a Debian installation (where the all the config. files are under # /etc/px (not /etc/px/etc). # When installed with a debian package, a symbolic link will be created: # ln -s /etc/px $HOME/etc ($HOME of the px user) try: self.masterConfPath = pwd.getpwnam('px')[ 5] # Master Configuration root path (1 machine, debian way) except: self.masterConfPath = '/apps/px' self.clusters = ['.'] # 1 machine only (no "real" clusters) #self.clusters = ['px', 'pxatx', 'pds'] self.feEnvVar = 'PX_FRONTEND' self.sfeEnvVar = 'PX_SIMILI_FRONTEND' # Use to execute command (pxRetrans, .localRetrans) on the backend # via ssh command run on the frontend self.fePrivKey = '' # Use to connect as beUser from fe to be self.beUser = '******' # Use to ssh from the frontend to the backend # Use to scp results from the backend to the frontend self.bePrivKey = '' self.feUser = '******' # On the backend, results (listing) will be sent to this frontend user self.feTempDir = '/tmp' # On the backend, results (listing) will be sent to this directory self.waitTimeForLogEntry = 2 # Time we wait in seconds for obtaining the log line indicating a successful transfert self.readConfig(PXPaths.ETC + "px.conf")
def findClient(self, clusters=None, ip="", name=""): """ clusters: a list of clusters (ex: ['pds', 'px', 'pxatx']) ip: IP address (ex: '192.168.1.1') name: hostname (ex: 'metmgr') Only one argument in (ip, name) must be non null """ import socket clusters = clusters or self.clusters cliClust = [] if ip: try: # get the first part of the fully qualified domain name name = socket.gethostbyaddr(ip)[0].split('.')[0] except: pass elif name: try: ip = socket.gethostbyname(name) except: pass for cluster in clusters: clusterRoot = self.rootPath + cluster PXPaths.normalPaths(clusterRoot) if ip and name: command = "grep -l -E '%s|%s' %s" % (ip, name, PXPaths.TX_CONF + "*.conf") elif ip: command = "grep -l -E '%s' %s" % (ip, PXPaths.TX_CONF + "*.conf") elif name: command = "grep -l -E '%s' %s" % (name, PXPaths.TX_CONF + "*.conf") #print "%s" % cluster.upper() output = commands.getoutput(command) clients = [ (os.path.basename(cli)[:-5], cluster) for cli in output.split()] cliClust.extend(clients) PXPaths.normalPaths() # Reset PXPaths variables return cliClust
def __init__(self): PXPaths.normalPaths() self.debug = 0 self.lang = pxRetransLanguage.french # masterConfPath is a directory where configuration files for all clusters can be found. # With 2 cluster named cluster1 and cluster2, the config. files will be under: # "masterConfPath"/cluster1/etc/ # "masterConfPath"/cluster2/etc/ # When we have in fact no cluster (a single machine), the cluster name will be "." and we # we thus have "masterConfPath"/./etc/ # This "scheme" works well when px is installled under a root directory (ex: /apps/px/) # It works less well under a Debian installation (where the all the config. files are under # /etc/px (not /etc/px/etc). # When installed with a debian package, a symbolic link will be created: # ln -s /etc/px $HOME/etc ($HOME of the px user) try: self.masterConfPath = pwd.getpwnam('px')[5] # Master Configuration root path (1 machine, debian way) except: self.masterConfPath = '/apps/px' self.clusters = ['.'] # 1 machine only (no "real" clusters) #self.clusters = ['px', 'pxatx', 'pds'] self.feEnvVar = 'PX_FRONTEND' self.sfeEnvVar = 'PX_SIMILI_FRONTEND' # Use to execute command (pxRetrans, .localRetrans) on the backend # via ssh command run on the frontend self.fePrivKey = '' # Use to connect as beUser from fe to be self.beUser = '******' # Use to ssh from the frontend to the backend # Use to scp results from the backend to the frontend self.bePrivKey = '' self.feUser = '******' # On the backend, results (listing) will be sent to this frontend user self.feTempDir = '/tmp' # On the backend, results (listing) will be sent to this directory self.waitTimeForLogEntry = 2 # Time we wait in seconds for obtaining the log line indicating a successful transfert self.readConfig(PXPaths.ETC + "px.conf")
def normalPaths(name, rootPath=""): PXPaths.normalPaths(rootPath) global ROOT, LIB, TO_SEND, RECEIVED, SENT, SPECIAL_ORDERS, STATE if rootPath: if rootPath[-1] != '/': rootPath += '/' envVar = rootPath else: try: envVar = os.path.normpath(os.environ['PXROOT']) + '/' except KeyError: envVar = '/apps/px/' ROOT = envVar LIB = ROOT + 'lib/%s/' % name RECEIVED = PXPaths.TXQ + name + '/.receivedAFTN/' SENT = PXPaths.TXQ + name + '/.sentAFTN/' SPECIAL_ORDERS = PXPaths.TXQ + name + '/.specialOrders/' STATE = PXPaths.TXQ + name + '/.state.obj'
def __init__(self, type='impulses', interval=1, imageName=None): PXPaths.normalPaths() self.manager = PXManager() #self.logger = logger #self.manager.setLogger(self.logger) self.latenciers = [] # Infos about a particular "latencier" self.type = type # Type of graph must be in: ['linespoint', 'lines', 'boxes', 'impulses'] self.interval = interval * dateLib.MINUTE # Number of seconds between each point on the x-axis self.imageName = imageName # Name of the image file self.color = None self.width = dateLib.DAY # Width of the x-axis in seconds # With witdh=DAY and interval=MINUTE => len([60, 120, 180, ..., 86400]) = 1440 self.separators = dateLib.getSeparators(self.width, self.interval) # '"0" 0, "1" 60, "2" 120, "3" 180, "4" 240, ... , "22" 1320, "23" 1380, "24" 1440' self.xtics = self.getXTics(len(self.separators), self.interval) self.graph = Gnuplot.Gnuplot()
def __init__(self, logger=None, sourlient=None, reloadMode=False): AFTNPaths.normalPaths(sourlient.name) PXPaths.normalPaths() self.logger = logger # Logger object self.sourlient = sourlient # Sourlient object self.name = sourlient.name # Transceiver's name self.stationID = sourlient.stationID # Value read from config. file self.otherStationID = sourlient.otherStationID # Provider (MHS) Station ID self.address = sourlient.address # 8-letter group identifying the message originator (CYHQUSER) self.otherAddress = sourlient.otherAddress # 8-letter group identifying the provider's address (CYHQMHSN) self.routingTable = sourlient.routingTable # Routing table name self.subscriber = sourlient.subscriber # Boolean indicating if this is a subscriber or a provider self.bullManager = bulletinManager(PXPaths.RXQ + self.name, self.logger, PXPaths.RXQ + self.name, 9999, '\n', self.sourlient.extension, self.routingTable, None, self.sourlient, True) self.drp = self.bullManager.drp self.sp = StationParser(PXPaths.STATION_TABLE, logger) self.sp.parse() self.priorities = { '1': 'FF', '2': 'FF', '3': 'GG', '4': 'GG', '5': 'GG' } if not reloadMode: self.afterInit()
def __init__(self, logger): self.logger = logger self.wmo_ids = [] PXPaths.normalPaths() self.path = PXPaths.ETC + 'wmo_id.conf'
############################################################################################# """ import sys, os.path, time, string import gateway import socketManagerWmo import bulletinManagerWmo import bulletinWmo from MultiKeysStringSorter import MultiKeysStringSorter from DiskReader import DiskReader from CacheManager import CacheManager import PXPaths from TextSplitter import TextSplitter PXPaths.normalPaths() class senderWmo(gateway.gateway): def __init__(self,path,client,logger): gateway.gateway.__init__(self, path, client, logger) self.client = client self.establishConnection() self.reader = DiskReader(PXPaths.TXQ + self.client.name, self.client.batch, # Number of files we read each time self.client.validation, # name validation self.client.patternMatching, # pattern matching self.client.mtime, # we don't check modification time True, # priority tree self.logger,
def setBasicPaths(self): """ @summary : Sets basic paths which are not influenced by language. Use full for finding ot what language to use and to call self.setPaths( language ) later on. @note : SETS THE FOLLOWING PATHS : STATSROOT , STATSBIN STATSETC, STATSLIB STATSDEV, STATSLANG Ans the paths under them. @note: """ # Protected StatsPaths # Paths without _() are protexted paths. # THEY, and the paths under them, MUST NOT BE TRANSLATED ! self.STATSROOT = self.__getPXStatsRoot() self.STATSBIN = self.STATSROOT + 'bin/' # Protected to ensure imports work ! self.STATSDEV = self.STATSROOT + 'dev/' # Protected to make sure dependencies work. self.STATSETC = self.STATSROOT + 'etc/' # Protected as to always fin the config files. self.STATSLANG = self.STATSROOT + 'lang/' # Protected as to always be able to access languages files. self.STATSLIB = self.STATSROOT + 'lib/' # Protected to ensure imports work ! # Paths under pxStats/bin/ self.STATSTOOLS = self.STATSBIN + 'tools/' self.STATSDEBUGTOOLS = self.STATSBIN + 'debugTools/' self.STATSWEBPAGESGENERATORS = self.STATSBIN + "webPages/" # Paths under pxStats/etc/ self.STATSPXCONFIGS = self.STATSETC + 'pxConfigFiles/' self.STATSPXRXCONFIGS = self.STATSPXCONFIGS + 'rx/' self.STATSPXTXCONFIGS = self.STATSPXCONFIGS + 'tx/' self.STATSPXTRXCONFIGS = self.STATSPXCONFIGS + 'trx/' #Paths under pxStats/dev/ self.STATSDEVDEPENDENCIES = self.STATSDEV + 'fileDependencies/' self.STATSDEVDEPENDENCIESBIN = self.STATSDEVDEPENDENCIES + 'bin/' self.STATSDEVDEPENDENCIESBINTOOLS = self.STATSDEVDEPENDENCIESBIN + 'tools/' self.STATSDEVDEPENDENCIESBINDEBUGTOOLS= self.STATSDEVDEPENDENCIESBIN + 'debugTools/' self.STATSDEVDEPENDENCIESBINWEBPAGES = self.STATSDEVDEPENDENCIESBIN + 'webPages/' self.STATSDEVDEPENDENCIESLIB = self.STATSDEVDEPENDENCIES + 'lib/' #Paths under pxStats/lang/ (French paths ) self.STATSLANGFR = self.STATSLANG + 'fr/' self.STATSLANGFRBIN = self.STATSLANGFR + 'bin/' self.STATSLANGFRBINTOOLS = self.STATSLANGFRBIN + 'tools/' self.STATSLANGFRBINDEBUGTOOLS = self.STATSLANGFRBIN + 'debugTools/' self.STATSLANGFRBINWEBPAGES = self.STATSLANGFRBIN + 'webPages/' self.STATSLANGFRLIB = self.STATSLANGFR + 'lib/' #Paths under pxStats/lang/ (English paths ) self.STATSLANGEN = self.STATSLANG + 'en/' self.STATSLANGENBIN = self.STATSLANGEN + 'bin/' self.STATSLANGENBINTOOLS = self.STATSLANGENBIN + 'tools/' self.STATSLANGENBINDEBUGTOOLS = self.STATSLANGENBIN + 'debugTools/' self.STATSLANGENBINWEBPAGES = self.STATSLANGENBIN + 'webPages/' self.STATSLANGENLIB = self.STATSLANGEN + 'lib/' sys.path.append( PXPATHS.getPXLIBPath() ) #print PXPATHS.getPXLIBPath() import PXPaths self.COLROOT = COLPATHS.getColumbosRootPath() """ PDS' columbo related paths """ self.PXPATHSPDSCOLGRAPHS = self.COLROOT + '/ColumboShow/graphs/' self.PDSCOLLOGS = self.COLROOT + '/ColumboShow/log/' self.PDSCOLETC = self.COLROOT + '/etc/' """ MetPX related paths """ PXPaths.normalPaths( str(PXPATHS.getPXLIBPath()).replace("lib/", "") ) self.PXROOT = PXPaths.ROOT self.PXLIB = PXPaths.LIB self.PXLOG = PXPaths.LOG self.PXETC = PXPaths.ETC self.PXETCRX = PXPaths.RX_CONF self.PXETCTX = PXPaths.TX_CONF self.PXETCTRX = PXPaths.TRX_CONF
# ############################################################################################# """ import bulletin import os,string,sys,time import PXPaths import CollectionBuilder import CollectionEntry import CollectionState import StationParser from CacheManager import CacheManager PXPaths.normalPaths() # Access to PX paths # CollectionManager class CollectionManager(object): """ The collectionManager reads RXQ dir, classifies the bulletins: If they have to be ingested do it, if they have to be collected than collect and ingest. Unrelated files are removed. Files that are not ready to be collected stay in the RXQ dir. """ def __init__(self, ingestor, bullManager, reader ): # General Attributes self.bullManager = bullManager
class DBSearcher: """ If we don't find what is requested in the current day of the DB, we will check in the previous. """ PXPaths.normalPaths() EXCLUDED_SOURCES = ['collecteur'] #EXCLUDED_SOURCES = [] TYPES = ['SA', 'FC', 'FT', 'TAF', 'FD', 'FD1', 'FD2', 'FD3'] # bulletin's type for which a specialized search exists COUNTRIES = ['CA', 'US'] INTERNATIONAL_SOURCES = ['nws-alpha', 'ukmetin', 'ukmet-bkp'] # sundew international sources CANADIAN_SOURCES = ['cmcin', 'ncp1', 'ncp2'] # sundew canadian sources FD_COUNTRIES = ['can', 'usa', 'ala', 'bfr'] FD_NUMBERS = [1, 2, 3] temp = dict(zip(FD_NUMBERS, ['', '', ''])) temp = dict(zip(FD_COUNTRIES, [temp.copy() for x in range(4)])) FD = {'low': copy.deepcopy(temp), 'high': copy.deepcopy(temp)} # Note: The country is not used in the logical of the search. # The height (low or high) and number(1,2,3) are used. # Canada FD['low']['can'][1] = 'FDCN01 CWAO' FD['low']['can'][2] = 'FDCN02 CWAO' FD['low']['can'][3] = 'FDCN03 CWAO' FD['high']['can'][1] = 'FDCN01 KWBC' FD['high']['can'][2] = 'FDCN02 KWBC' FD['high']['can'][3] = 'FDCN03 KWBC' # Usa FD['low']['usa'][1] = 'FDUS11 KWBC' FD['low']['usa'][2] = 'FDUS13 KWBC' FD['low']['usa'][3] = 'FDUS15 KWBC' FD['high']['usa'][1] = 'FDUS08 KWBC' FD['high']['usa'][2] = 'FDUS09 KWBC' FD['high']['usa'][3] = 'FDUS10 KWBC' # Alaska FD['low']['ala'][1] = 'FDAK01 KWBC' FD['low']['ala'][2] = 'FDAK02 KWBC' FD['low']['ala'][3] = 'FDAK03 KWBC' #FD['high']['ala'][1] = '' #FD['high']['ala'][2] = '' #FD['high']['ala'][3] = '' # BFR and PWM ?? FD['low']['bfr'][1] = 'FDUS12 KWBC' FD['low']['bfr'][2] = 'FDUS14 KWBC' FD['low']['bfr'][3] = 'FDUS16 KWBC' FD['high']['bfr'][1] = 'FDUE01 KWBC' FD['high']['bfr'][2] = 'FDUE03 KWBC' FD['high']['bfr'][3] = 'FDUE05 KWBC' LOW = [] LOW1 = [] LOW2 = [] LOW3 = [] HIGH = [] HIGH1 = [] HIGH2 = [] HIGH3 = [] for height in ['low', 'high']: for country in FD_COUNTRIES: for number in FD_NUMBERS: if height == 'low': LOW.append(FD[height][country][number]) eval('LOW' + str(number)).append( FD[height][country][number]) elif height == 'high': HIGH.append(FD[height][country][number]) eval('HIGH' + str(number)).append( FD[height][country][number]) for country in FD_COUNTRIES: exec(country + 'List= []') for height in ['low', 'high']: for number in FD_NUMBERS: if (FD[height][country][number]): eval(country + 'List').append(FD[height][country][number]) #eval(country + 'List').sort() def __init__(self, request, printout=True): self.today = dateLib.getTodayFormatted() self.yesterday = dateLib.getYesterdayFormatted() #self.today = '20060523' #self.yesterday = '20060522' self.printout = printout self.request = request # Request before being parsed self.requestType = None # 1 for fully qualified header, 2 for type + station(s) # Ex. of a fully qualified header request: FPCN11 CWTO self.ttaaii = None # First word of a fully qualified header self.tt = None # Type extract from ttaaii self.center = None # Second word of a fully qualified header self.header = None # Fully qualified header (ex: "FPCN11 CWTO") self.country = None # Country is obtained from the center (Only used for CA and US) # Ex. of a type + station(s) request: SA YUL YQB YZV self.type = None # Must be in TYPES self.stations = [] # Between 1 and 5 stations self.stationParser = None # Used to map a station to a fully qualified header self.debug = False self._parseRequest() # Will determine the request's type #self.printInfos() self.results = self._search() # Find what we search def _parseRequest(self): words = self.request.split() words = [word.upper() for word in words] if len(words) < 2 or len(words) > 6: if self.debug: print "\nBad request" elif len(words) == 2 and 4 <= len(words[0]) <= 6 and len( words[1]) == 4: # ex: SACN31 CWAO # ex: FPCN31 CWAO #print "\nFully qualified header request\n" self.requestType = 1 self.ttaaii = words[0] self.tt = words[0][:2] self.center = words[1] self.header = words[0] + " " + words[1] if self.center[0] == 'C': self.country = 'CA' elif self.center[0] == 'K': self.country = 'US' elif words[0] in DBSearcher.TYPES: # ex: SA YUL # ex: SA CYUL PATQ #print "\nPartial header request (Type + station(s))\n" self.requestType = 2 self.type = words[0] for station in words[1:]: if self._validateStation(station): self.stations.append(station) else: if self.debug: print("%s is not a valid station name" % station) else: if self.debug: print "\nBad request even if the word's number is good" def _search(self): # FIXME: Must select best result from multiple machines if self.requestType == 1: # Fully qualified header request if self.debug: print self.ttaaii, self.center, self.country for date in [self.today, self.yesterday]: theFile = self._findFullHeader(date, True, self.ttaaii, self.center, self.country, DBSearcher.EXCLUDED_SOURCES) if theFile: #print theFile return theFile return None elif self.requestType == 2: allResults = [] for station in self.stations: for date in [self.today, self.yesterday]: #print 'DATE: %s' % date if self.type == 'SA': results = self._findSA([station], date) if results[0][1]: allResults.extend(results) #self.printResults(results, self.type) if self.printout: print 80 * '-' for result in results: print self.formatResult(result, self.type) break elif self.type in ['FC', 'FT', 'TAF']: results = self._findTAF([station], date) if results[0][1]: allResults.extend(results) #self.printResults(results, self.type) if self.printout: print 80 * '-' for result in results: print self.formatResult(result, self.type) break elif self.type in ['FD', 'FD1', 'FD2', 'FD3']: results = self._findFD([station], self.type, date) if results[0][1]: allResults.extend(results) #self.printResults(results) if self.printout: print 80 * '-' for result in results: print self.formatResult(result, self.type) break #print allResults return allResults #file = open(PXPaths.REQUEST_REPLY + 'results.pickle', 'w') #pickle.dump(results, file) #file.close() def _getFilesToParse(self, root, headers, excludedSources=None): """ Given a root path (ex: PXPaths.DB + date + '/SA/') and a list of headers (ex: ['SAAK31 KWBC', 'SAAK41 KNKA', 'SAUS20 KNKA', 'SAUS70 KWBC']), find the list of files matching these criterias. """ filesToParse = [] if headers == ['']: pass else: centers = FileParser.removeDuplicate( [header.split()[1] for header in headers]) # Request SA PATQ # => headers = ['SAAK31 KWBC', 'SAAK41 KNKA', 'SAUS20 KNKA', 'SAUS70 KWBC'] # => ttaaiis = {'KNKA': ['SAAK41', 'SAUS20'], 'KWBC': ['SAAK31', 'SAUS70']} ttaaiis = {} for header in headers: ttaaiis.setdefault(header.split()[1], []).append(header.split()[0]) try: if not excludedSources: excludedSources = [] sources = os.listdir(root) for source in excludedSources: if source in sources: sources.remove(source) except: (type, value, tb) = sys.exc_info() if self.printout: print("Type: %s, Value: %s" % (type, value)) return filesToParse #print("Headers: %s" % headers) #print("ttaaiis: %s" % ttaaiis) #print("centers: %s" % centers) #print("sources: %s\n" % sources) for source in sources: for center in centers: pathToCenter = root + source + '/' + center try: for file in os.listdir(pathToCenter): for ttaaii in ttaaiis[center]: if file[:len(ttaaii)] == ttaaii: filesToParse.append(pathToCenter + '/' + file) break except: (type, value, tb) = sys.exc_info() if self.printout: if self.debug: print("Type: %s, Value: %s" % (type, value)) continue #print ("len(filesToParse) = %d\n" % len(filesToParse)) return filesToParse def _findFD(self, stations, fdtype, date): from StationParser import StationParser from FDParser import FDParser results = [ ] # ex: [('CYOW', FD_LINE, FD_HEADER_TIME, FD_FILE, FD_FILE_TIME), ('CYUL', ...)] sp = StationParser(PXPaths.ETC + 'stations_FD.conf') sp.parse() if fdtype in ['FD1', 'FD2', 'FD3']: number = fdtype[-1] else: number = '' for station in stations: headers = sp.headers.get(station, []) headers.sort() lowHeaders = [] highHeaders = [] for header in headers: if header in eval('DBSearcher.LOW' + number): lowHeaders.append(header) elif header in eval('DBSearcher.HIGH' + number): highHeaders.append(header) for header in lowHeaders + highHeaders: filesToParse = self._getFilesToParse( PXPaths.DB + date + '/FD/', [header]) #print("In findFD, len(filesToParse) = %d" % len(filesToParse)) theLine, bestHeaderTime, theFile, bestFileTime = self._findMoreRecentStation( FDParser(''), filesToParse, station) if theLine: bigTitle = FDParser('').getFDTitle(theFile) #print("BIG TITLE: \n%s" % bigTitle) #print theFile #print "theLine: %s" % theLine theLine = bigTitle + theLine results.append( (station, theLine, bestHeaderTime, theFile, bestFileTime)) if lowHeaders == highHeaders == []: results.append((station, None, 0, None, 0)) return results def _findTAF(self, stations, date): from StationParser import StationParser from TAFParser import TAFParser results = [ ] # ex: [('CYOW', TAF_LINE, TAF_HEADER_TIME, TAF_FILE, TAF_FILE_TIME), ('CYUL', ...)] sp = StationParser(PXPaths.ETC + 'stations_TAF.conf') sp.parse() for station in stations: headers = sp.headers.get(station, []) filesToParse = self._getFilesToParse(PXPaths.DB + date + '/FC/', headers) filesToParse.extend( self._getFilesToParse(PXPaths.DB + date + '/FT/', headers)) #print("In findTAF, len(filesToParse) = %d" % len(filesToParse)) theLine, bestHeaderTime, theFile, bestFileTime = self._findMoreRecentStation( TAFParser(''), filesToParse, station) if theLine: theLine += '=' results.append( (station, theLine, bestHeaderTime, theFile, bestFileTime)) return results def _findSA(self, stations, date): # Partial header request (Type + station(s)) # ex: SA CYOW CYUL # Note: Once we find the better match, we take the header we found (ex: SACN31 CWAO) and we replace the # A by a P (ex: SPCN31 CWAO) and try to find a speci for the station. The speci must be between the full # hour of the SA and the request time. from StationParser import StationParser from SAParser import SAParser results = [ ] # ex: [('CYOW', SA_LINE, SA_HEADER_TIME, SA_FILE, SA_FILE_TIME, SP_LINE, SP_HEADER_TIME, SP_FILE, SP_FILE_TIME), ('CYUL', ...)] sp = StationParser(PXPaths.ETC + 'stations_SA.conf') sp.parse() for station in stations: threeCharHeaders = [] if len(station) == 3: #print ("%s => we will search for %s first, if we obtain no results, we will search for %s" % (station, 'C' + station, station)) threeCharHeaders = sp.headers.get(station, []) station = 'C' + station headers = sp.headers.get(station, []) elif station[0] == 'C': #print("%s is a canadian station" % station) headers = sp.headers.get(station, []) elif station[0] == 'K': #print("%s is an american station" % station) headers = sp.headers.get(station, []) else: #print("%s is an international station" % station) headers = sp.headers.get(station, []) filesToParse = self._getFilesToParse(PXPaths.DB + date + '/SA/', headers, DBSearcher.EXCLUDED_SOURCES) theLine, bestHeaderTime, theFile, bestFileTime = self._findMoreRecentStation( SAParser(''), filesToParse, station) if not theLine and threeCharHeaders: # If not successful at finding the 4 chars station when the original request was for a 3 chars station # we try the 3 chars case #print 'We are searching for the 3 chars station' station = station[1:] filesToParse = self._getFilesToParse( PXPaths.DB + date + '/SA/', threeCharHeaders, DBSearcher.EXCLUDED_SOURCES) theLine, bestHeaderTime, theFile, bestFileTime = self._findMoreRecentStation( SAParser(''), filesToParse, station) if theLine: theLine += '=' parts = os.path.basename(theFile).split('_') header = parts[0] + ' ' + parts[1] speciLine, speciHeaderTime, speciFile, speciFileTime = self._findSpeci( station, header, bestHeaderTime, date) if speciHeaderTime and (speciHeaderTime < bestHeaderTime): surround = 30 * '=' #print 'Speci found has been rejected (%s < %s)' % (speciHeaderTime, bestHeaderTime) speciLine, speciHeaderTime, speciFile, speciFileTime = None, 0, None, 0 #print "%s END SPECI INFOS %s\n" % (surround, surround) else: speciLine, speciHeaderTime, speciFile, speciFileTime = None, 0, None, 0 results.append( (station, theLine, bestHeaderTime, theFile, bestFileTime, speciLine, speciHeaderTime, speciFile, speciFileTime)) return results def formatResult(self, result, type): saResult = '' speciResult = '' if type == 'SA': station, theLine, bestHeaderTime, theFile, bestFileTime, speciLine, speciHeaderTime, speciFile, speciFileTime = result else: station, theLine, bestHeaderTime, theFile, bestFileTime = result if theFile: parts = os.path.basename(theFile).split('_') header = parts[0] + ' ' + parts[1] saResult = header + ' ' + bestHeaderTime + '\n' + theLine.strip( ) + '\n' #print repr(theLine) if type == 'SA' and speciLine: speciHeader = header[0] + 'P' + header[2:] speciResult = speciHeader + ' ' + speciHeaderTime + '\n' + speciLine.strip( ) + '\n\n' banner = 80 * '-' return speciResult + saResult + banner def printResults(self, results, type=None): print "%s RESULTS %s" % (30 * '=', 30 * '=') for result in results: if type == 'SA': station, theLine, bestHeaderTime, theFile, bestFileTime, speciLine, speciHeaderTime, speciFile, speciFileTime = result else: station, theLine, bestHeaderTime, theFile, bestFileTime = result print "Station: %s" % station print "Line:\n%s" % theLine print "HeaderTime: %s" % bestHeaderTime print "File: %s" % theFile print "FileTime: %s" % bestFileTime if type == 'SA': print "SP_Line: %s" % speciLine print "SP_HeaderTime: %s" % speciHeaderTime print "SP_File: %s" % speciFile print "SP_FileTime: %s" % speciFileTime print "\n" def _findSpeci(self, station, header, headerTime, DBDate): from SAParser import SAParser now = time.mktime(time.gmtime()) ttaaii, center = header.split() ttaaii = ttaaii[0] + 'P' + ttaaii[2:] filesToParse = self._findFullHeader(DBDate, False, ttaaii, center, 'INT', DBSearcher.EXCLUDED_SOURCES) theLine, bestHeaderTime, theFile, bestFileTime = self._findMoreRecentStation( SAParser(''), filesToParse or [], station) if theLine: theLine += '=' #for file in files: # print file """ surround = 30*'=' print "%s SPECI INFOS %s" % (surround, surround) print "Number of files: %d" % len(filesToParse) print "Station: %s" % station print "Header: %s" % header print "Time: %s" % headerTime print "ttaaii: %s" % ttaaii print "center: %s" % center print "Now: %s\n" % now print "%s%s%s" % (surround, len(' SPECI INFOS ') * '=', surround) """ return (theLine, bestHeaderTime, theFile, bestFileTime) def _findMoreRecentStation(self, parser, files, station, startDate=None): bestHeaderTime = 0 bestFileTime = 0 theLine = None theFile = None for file in files: parser.filename = file stationLine, headerTime = parser.findStationLine(station) if headerTime > bestHeaderTime: bestHeaderTime = headerTime bestFileTime = os.path.getmtime(file) theLine = stationLine theFile = file elif headerTime == bestHeaderTime: if os.path.getmtime(file) > bestFileTime: bestFileTime = os.path.getmtime(file) theLine = stationLine theFile = file return (theLine, bestHeaderTime, theFile, bestFileTime) def _findFullHeader(self, date, unique=True, ttaaii='SACN31', center='CWAO', country='CA', excludedSources=None): self.theFile = None # The filename of the more recent header in a full qualified header search self.bestFileTime = 0 # More recent file self.bestHeaderTime = 0 # More recent header allGoodFiles = [] try: iterator = os.walk(PXPaths.DB + date) path, dirs, files = iterator.next() if self.debug: print path, dirs, files except: (type, value, tb) = sys.exc_info() if self.debug: print("In _findFullHeader: Type = %s, Value = %s" % (type, value)) if self.debug: print( "The request (%s) has been stopped at the date (%s) level" % (self.request, date)) return self.theFile # We select only the "tt" directory for dir in dirs[:]: if dir != ttaaii[:2]: dirs.remove(dir) # We select the "sources" if possible (CAN or US) # This is considered an optimization. We should be # able to turn this off try: pathBeforeSource, dirs, files = iterator.next() if self.debug: print pathBeforeSource, dirs, files except: (type, value, tb) = sys.exc_info() if self.debug: print("Type: %s, Value: %s" % (type, value)) if self.debug: print( "The request (%s) has been stopped at the source(s) level" % (self.request)) return self.theFile if country == 'CA': for dir in dirs[:]: if dir in DBSearcher.INTERNATIONAL_SOURCES: dirs.remove(dir) dirs.sort() elif country == 'US': for dir in dirs[:]: if dir in DBSearcher.CANADIAN_SOURCES: dirs.remove(dir) dirs.sort() if not excludedSources: excludedSources = [] for source in excludedSources: if source in dirs: dirs.remove(source) for source in dirs: iterator = os.walk(pathBeforeSource + "/" + source) # We select only the "center" directory try: path, dirs, files = iterator.next() if self.debug: print path, dirs, files except: (type, value, tb) = sys.exc_info() if self.debug: print("Type: %s, Value: %s" % (type, value)) print( "The request (%s) has been stopped at the center (%s) level for the source %s" % (self.request, center, source)) continue for dir in dirs[:]: if dir != center: dirs.remove(dir) #print "Dirs: %s" % dirs # We select the "good bulletins" try: path, dirs, files = iterator.next() if self.debug > 10: print path, dirs, files except: (type, value, tb) = sys.exc_info() if self.debug: print("Type: %s, Value: %s" % (type, value)) print( "The request (%s) has been stopped at the ttaaii (%s) level for the source %s and the center %s" % (self.request, ttaaii, source, center)) continue length = len(ttaaii) goodFiles = [ path + '/' + file for file in files if file[:length] == ttaaii ] #self._findMoreRecentFile(goodFiles) if unique: self._findMoreRecentHeader(goodFiles) else: allGoodFiles.extend(goodFiles) if unique: return self.theFile else: return allGoodFiles def _findMoreRecentFile(self, files): """ The search is based on the time when the file was written on disk """ for file in files: mtime = os.path.getmtime(file) if mtime > self.bestFileTime: self.bestFileTime = mtime self.theFile = file def _findMoreRecentHeader(self, files): """ The search is based on the time when the header was written in the bulletin and in case of equality, the time when the file was written on disk """ for file in files: try: handle = open(file, 'r') except: (type, value, tb) = sys.exc_info() if self.debug: print("Type: %s, Value: %s" % (type, value)) if self.debug: print("Cannot open %s" % file) continue parts = handle.readline().split() if len(parts) < 3: if self.debug: print("Not enough parts (%s) in the header" % parts) continue elif len(parts[2]) == 7: if parts[2][:6].isdigit() and parts[2][-1].upper() == 'Z': pass else: if self.debug: print("(CASE 1)Third part is not a valid time (%s)" % parts) continue elif len(parts[2]) == 6: if parts[2][:6].isdigit(): pass else: if self.debug: print("(CASE 2)Third part is not a valid time (%s)" % parts) continue else: if self.debug: print("(CASE 3)Third part is not a valid time (%s)" % parts) continue ddhhmm = parts[2][:6] if ddhhmm > self.bestHeaderTime: self.theFile = file self.bestFileTime = os.path.getmtime(file) self.bestHeaderTime = ddhhmm elif ddhhmm == self.bestHeaderTime: mtime = os.path.getmtime(file) if mtime > self.bestFileTime: self.theFile = file self.bestFileTime = mtime self.bestHeaderTime = ddhhmm handle.close() def printInfos(self): print("Request: %s" % self.request) print("Request Type: %s" % self.requestType) print("Header: %s" % self.header) print("tt: %s" % self.tt) print("Centre: %s" % self.center) print("Type: %s" % self.type) print("Stations: %s" % self.stations) print("Country: %s" % self.country) def _validateStation(self, station): return 3 <= len(station) <= 4
# Date: 2005-01-10 (Initial version by PS) # 2005-08-21 (OO version by DL) # 2005-11-01 (Path stuff by MG) # # Description: # ############################################################################################# """ import sys, os, re, time, fnmatch import PXPaths from URLParser import URLParser from Logger import Logger #from Flow import Flow PXPaths.normalPaths() # Access to PX paths class Client(object): def __init__(self, name='toto', logger=None): #Flow.__init__(self, name, 'sender', type, batch) # Parent constructor # General Attributes self.name = name # Client's name if logger is None: self.logger = Logger(PXPaths.LOG + 'tx_' + name + '.log', 'INFO', 'TX' + name) # Enable logging self.logger = self.logger.getLogger() else: self.logger = logger
def __init__(self, logger ): self.logger = logger self.wmo_ids = [] PXPaths.normalPaths() self.path = PXPaths.ETC + 'wmo_id.conf'
# # Description: Resend a PX file. # # Date: 2006-07-07 # ########################################################### """ # Python API imports import sys import commands from optparse import OptionParser # Local imports sys.path.insert(1,sys.path[0] + '/../') import PXPaths; PXPaths.normalPaths() from ResendObject import * def parseRawLine(line): lineParts = line.split(":") header = ":".join(lineParts[2:]) machine = lineParts[0] return machine, header def validateUserInput(options, args): if len(args) > 1: if len(args.split(":")) < 3: sys.exit("Input was not formatted correctly.\nIt should be machine:log:header\n'log' can be anything, it is a field returned by pxSearch") def updateResendObject(ro, options, args): ro.setPrompt(options.prompt)
def setBasicPaths(self): """ @summary : Sets basic paths which are not influenced by language. Use full for finding ot what language to use and to call self.setPaths( language ) later on. @note : SETS THE FOLLOWING PATHS : STATSROOT , STATSBIN STATSETC, STATSLIB STATSDEV, STATSLANG Ans the paths under them. @note: """ # Protected StatsPaths # Paths without _() are protexted paths. # THEY, and the paths under them, MUST NOT BE TRANSLATED ! self.STATSROOT = self.__getPXStatsRoot() self.STATSBIN = self.STATSROOT + 'bin/' # Protected to ensure imports work ! self.STATSDEV = self.STATSROOT + 'dev/' # Protected to make sure dependencies work. self.STATSETC = self.STATSROOT + 'etc/' # Protected as to always fin the config files. self.STATSLANG = self.STATSROOT + 'lang/' # Protected as to always be able to access languages files. self.STATSLIB = self.STATSROOT + 'lib/' # Protected to ensure imports work ! # Paths under pxStats/bin/ self.STATSTOOLS = self.STATSBIN + 'tools/' self.STATSDEBUGTOOLS = self.STATSBIN + 'debugTools/' self.STATSWEBPAGESGENERATORS = self.STATSBIN + "webPages/" # Paths under pxStats/etc/ self.STATSPXCONFIGS = self.STATSETC + 'pxConfigFiles/' self.STATSPXRXCONFIGS = self.STATSPXCONFIGS + 'rx/' self.STATSPXTXCONFIGS = self.STATSPXCONFIGS + 'tx/' self.STATSPXTRXCONFIGS = self.STATSPXCONFIGS + 'trx/' #Paths under pxStats/dev/ self.STATSDEVDEPENDENCIES = self.STATSDEV + 'fileDependencies/' self.STATSDEVDEPENDENCIESBIN = self.STATSDEVDEPENDENCIES + 'bin/' self.STATSDEVDEPENDENCIESBINTOOLS = self.STATSDEVDEPENDENCIESBIN + 'tools/' self.STATSDEVDEPENDENCIESBINDEBUGTOOLS = self.STATSDEVDEPENDENCIESBIN + 'debugTools/' self.STATSDEVDEPENDENCIESBINWEBPAGES = self.STATSDEVDEPENDENCIESBIN + 'webPages/' self.STATSDEVDEPENDENCIESLIB = self.STATSDEVDEPENDENCIES + 'lib/' #Paths under pxStats/lang/ (French paths ) self.STATSLANGFR = self.STATSLANG + 'fr/' self.STATSLANGFRBIN = self.STATSLANGFR + 'bin/' self.STATSLANGFRBINTOOLS = self.STATSLANGFRBIN + 'tools/' self.STATSLANGFRBINDEBUGTOOLS = self.STATSLANGFRBIN + 'debugTools/' self.STATSLANGFRBINWEBPAGES = self.STATSLANGFRBIN + 'webPages/' self.STATSLANGFRLIB = self.STATSLANGFR + 'lib/' #Paths under pxStats/lang/ (English paths ) self.STATSLANGEN = self.STATSLANG + 'en/' self.STATSLANGENBIN = self.STATSLANGEN + 'bin/' self.STATSLANGENBINTOOLS = self.STATSLANGENBIN + 'tools/' self.STATSLANGENBINDEBUGTOOLS = self.STATSLANGENBIN + 'debugTools/' self.STATSLANGENBINWEBPAGES = self.STATSLANGENBIN + 'webPages/' self.STATSLANGENLIB = self.STATSLANGEN + 'lib/' sys.path.append(PXPATHS.getPXLIBPath()) #print PXPATHS.getPXLIBPath() import PXPaths self.COLROOT = COLPATHS.getColumbosRootPath() """ PDS' columbo related paths """ self.PXPATHSPDSCOLGRAPHS = self.COLROOT + '/ColumboShow/graphs/' self.PDSCOLLOGS = self.COLROOT + '/ColumboShow/log/' self.PDSCOLETC = self.COLROOT + '/etc/' """ MetPX related paths """ PXPaths.normalPaths(str(PXPATHS.getPXLIBPath()).replace("lib/", "")) self.PXROOT = PXPaths.ROOT self.PXLIB = PXPaths.LIB self.PXLOG = PXPaths.LOG self.PXETC = PXPaths.ETC self.PXETCRX = PXPaths.RX_CONF self.PXETCTX = PXPaths.TX_CONF self.PXETCTRX = PXPaths.TRX_CONF
LOG = '/apps/px/' + 'log/' FXQ = ROOT + 'fxq/' RXQ = ROOT + 'rxq/' TXQ = ROOT + 'txq/' DB = ROOT + 'db/' FX_CONF = ETC + 'fx/' RX_CONF = ETC + 'rx/' TX_CONF = ETC + 'tx/' TRX_CONF = ETC + 'trx/' # TEST normalPaths... if __name__ == '__main__': import PXPaths PXPaths.normalPaths("") print("ROOT %s" % PXPaths.ROOT ) print("LIB %s" % PXPaths.LIB ) print("LOG %s" % PXPaths.LOG ) print("ETC %s" % PXPaths.ETC ) print("FXQ %s" % PXPaths.FXQ ) print("RXQ %s" % PXPaths.RXQ ) print("TXQ %s" % PXPaths.TXQ ) print("DB %s" % PXPaths.DB ) print("ROUTING_TABLE %s" % PXPaths.ROUTING_TABLE ) print("STATION_TABLE %s" % PXPaths.STATION_TABLE ) print("FX_CONF %s" % PXPaths.FX_CONF )
############################################################################################# import cgi import cgitb; cgitb.enable() import sys, os, pwd, time, re, pickle, commands sys.path.append(sys.path[0] + "/../../lib"); sys.path.append("../../lib") sys.path.append("/apps/px/lib") sys.path.append("/apps/px/lib/importedLibs") import template from PDSPath import * from ColumboPaths import * from types import * from myTime import * import PXPaths; PXPaths.normalPaths() from ConfReader import ConfReader cr = ConfReader("%spx.conf" % (PXPaths.ETC)) user = cr.getConfigValues("user")[0] backends = cr.getConfigValues("backend") form = cgi.FieldStorage() def menuContent(): """ Creates the menu options dynamically Returns: a string """ flows = []
FXQ = ROOT + 'fxq/' RXQ = ROOT + 'rxq/' TXQ = ROOT + 'txq/' DB = ROOT + 'db/' FX_CONF = ETC + 'fx/' RX_CONF = ETC + 'rx/' TX_CONF = ETC + 'tx/' TRX_CONF = ETC + 'trx/' # TEST normalPaths... if __name__ == '__main__': import PXPaths PXPaths.normalPaths("") print("ROOT %s" % PXPaths.ROOT) print("LIB %s" % PXPaths.LIB) print("LOG %s" % PXPaths.LOG) print("ETC %s" % PXPaths.ETC) print("FXQ %s" % PXPaths.FXQ) print("RXQ %s" % PXPaths.RXQ) print("TXQ %s" % PXPaths.TXQ) print("DB %s" % PXPaths.DB) print("ROUTING_TABLE %s" % PXPaths.ROUTING_TABLE) print("STATION_TABLE %s" % PXPaths.STATION_TABLE) print("FX_CONF %s" % PXPaths.FX_CONF)
def _reload(self, sig, stack): """ Do the real work here. Depends of type of sender/receiver """ if self.gateway is None: # Because we don't have a gateway object, it means that we can only reread the configuration # file of the source/client, not particular files like Circuit and Stations, because # they haven't been read at this time anyway. # If we are there, it is because we don't have a gateway object, if means that we are # waiting for a connection, the easiest way to reread the configuration file of # the sources/clients AND the value of the variables in the configuration file of this # particular source/client is by restarting it! #self.logger.info("ppid=%s, pid=%s, pgid=%s, sid=%s, cterm=%s" % (os.getppid(), os.getpid(), os.getpgrp(), os.getsid(os.getpid()), os.ctermid())) #output = commands.getoutput('ls -alrt /proc/%s/fd' % os.getpid()) #self.logger.info(output) if os.fork() == 0: for fd in range(3,10): try: os.close(fd) except OSError: pass import PXPaths PXPaths.normalPaths() appName = 'px' + self.direction.capitalize() os.execl(PXPaths.BIN + appName, appName, self.flowName, 'restart') else: pass else: #print self.gateway if self.direction == 'sender': self.reloadMode = True elif self.direction == 'filter': self.reloadMode = True elif self.direction == 'receiver': if self.type == 'am': # FIXME: Should be put in amReceiver code # We assign the defaults, reread configuration file for the source # and reread all configuration file for the clients (all this in __init__) self.flow.__init__(self.flow.name, self.flow.logger) self.gateway.unBulletinManager.extension = self.flow.extension self.gateway.unBulletinManager.addSMHeader = self.flow.addSMHeader #print self.flow #print "ext: %s" % (self.flow.extension) #print "addSM: %s" % (self.flow.addSMHeader) # Reset all the clients + sourlients names to which px can link (independantly of the routing table) self.gateway.unBulletinManager.drp.pxLinkables = self.flow.ingestor.allNames # Reparse the ROUTING_TABLE self.gateway.unBulletinManager.drp.reparse() self.gateway.unBulletinManager.reloadMapEntetes(self.gateway.pathFichierStations) self.logger.info("%s has been reloaded" % self.direction.capitalize()) if self.type == 'wmo': # FIXME: Should be put in wmoReceiver code # We assign the defaults, reread configuration file for the source # and reread all configuration file for the clients (all this in __init__) self.flow.__init__(self.flow.name, self.flow.logger) self.gateway.unBulletinManager.extension = self.flow.extension # Reset all the clients + sourlients names to which px can link (independantly of the routing table) self.gateway.unBulletinManager.drp.pxLinkables = self.flow.ingestor.allNames # Reparse the ROUTING_TABLE self.gateway.unBulletinManager.drp.reparse() self.logger.info("%s has been reloaded" % self.direction.capitalize()) if self.type == 'collector' : self.reloadMode = True if self.type == 'single-file' or self.type == 'bulletin-file': self.reloadMode = True if self.type == 'pull-bulletin' or self.type == 'pull-file': self.reloadMode = True elif self.direction == 'transceiver': if self.type == 'aftn': #if os.fork() == 0: # self.restart() # self.logger.info("%s %s has been reloaded by restarting it" % (self.direction.capitalize(), self.flow.name)) #else: # pass # FIXME: Should be put in TransceiverAFTN code # We assign the defaults, reread configuration file for the sourlient # and reread all configuration file for the clients (all this in __init__) self.flow.__init__(self.flow.name, self.flow.logger) self.gateway.mm.reloadMode = True self.gateway.mm.__init__(self.flow.logger, self.flow, True) #self.gateway.mm.bullManager.extension = self.flow.extension # Reset all the clients + sourlients names to which px can link (independantly of the routing table) #self.gateway.mm.bullManager.drp.pxLinkables = self.flow.ingestor.allNames #self.gateway.mm.drp = self.gateway.mm.bullManager.drp # Reparse the ROUTING_TABLE #self.gateway.unBulletinManager.drp.reparse() self.logger.info("%s has been reloaded" % self.direction.capitalize())