def parseArgs(self, argv): import getopt short_options = 'vx:' long_options = ['xunit=', 'log-level=', 'log-config=', 'verbose'] xunit = False xmlout = None log_level = None log_config = None options, args = getopt.getopt(argv, short_options, long_options) for opt, value in options: if opt in ('-v', '--verbose'): self.verbosity = 2 elif opt in ('-x', '--xunit'): xunit = True xmlout = value elif opt == '--log-level': # Map from string names to Python levels (this does not appear to # be built into Python's logging module) log_level = ossie.utils.log4py.config._LEVEL_TRANS.get( value.upper(), None) elif opt == '--log-config': log_config = value # If requested, use XML output (but the module is non-standard, so it # may not be available). if xunit: try: import xmlrunner if xmlout: self.testRunner = xmlrunner.XMLTestRunner( output=xmlout, verbosity=self.verbosity) else: self.testRunner = xmlrunner.XMLTestRunner( verbosity=self.verbosity) except ImportError: print >> sys.stderr, 'WARNING: XML test runner module is not installed' except TypeError: # Maybe it didn't like the verbosity argument self.testRunner = xmlrunner.XMLTestRunner() # If a log4j configuration file was given, read it. if log_config: ossie.utils.log4py.config.fileConfig(log_config) else: # Set up a simple configuration that logs on the console. logging.basicConfig() # Apply the log level (can override config file). if log_level: logging.getLogger().setLevel(log_level) # Any additional arguments are test names self.testNames = args
def __call__(self, *args, **kwargs): # Pick off sandbox-specific arguments, which are not given to the # helper class __init__ method sandbox = kwargs.pop('sandbox', None) if sandbox is None: sandbox = default_sandbox() auto_start = kwargs.pop('autoStart', True) # Create/initialize the helper obj = super(SandboxMeta, self).__call__(*args, **kwargs) # Create a unique instance name, and register with the sandbox name = sandbox._createInstanceName(obj.__class__.__name__) obj._registerWithSandbox(sandbox, name) # Set a sensible default logger based on the module and instance name obj.log = logging.getLogger(obj.__module__).getChild(name) # Perform any post-registration initialization obj._initializeHelper() # Auto-start helpers if auto_start and sandbox.started: obj.start() return obj
def __init__(self, name, PortTypeClass, PortTransferType, logger=None, dataType=list, bits=0): # Backwards-compatibility: accept an element type string for use with # struct.calcsize if bits == 0: bits = struct.calcsize(PortTransferType) * 8 self.name = name self._portLog = logger self.PortType = PortTypeClass self.PortTransferType=PortTransferType self.outConnections = {} # key=connectionId, value=port self.stats = OutStats(self.name, bits=bits) self.port_lock = threading.Lock() self.sriDict = {} # key=streamID value=SriMapStruct self.filterTable = [] # Data type class self._dataType = dataType # Retain noData member for backwards-compatibility self.noData = dataType() # Determine maximum transfer size in advance self._bitSize = bits # Multiply by some number < 1 to leave some margin for the CORBA header self.maxSamplesPerPush = 8 * int(MAX_TRANSFER_BYTES*.9) / self._bitSize # Retain byte size for backwards-compatibility self.byteSize = self._bitSize / 8 self._streams = {} if self._portLog == None: self._portLog = logging.getLogger("redhawk.bulkio.outport."+name)
def __init__(self, name, bits, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize): self.name = name self._portLog = logger self.queue = collections.deque() self._maxSize = maxsize self._breakBlock = False self.stats = InStats(name, bits=bits) self.blocking = False self.sri_cmp = sriCompare self.newSriCallback = newSriCallback self.sriChangeCallback = sriChangeCallback self.sriDict = {} # key=streamID, value=StreamSRI self._dataBufferLock = threading.Lock() self._dataAvailable = threading.Condition(self._dataBufferLock) self._queueAvailable = threading.Condition(self._dataBufferLock) # Backwards-compatibility self.port_lock = self._dataBufferLock # Synchronizes access to the SRIs self._sriUpdateLock = threading.Lock() # Streams that are currently active (map of streamID to stream objects) self._streams = {} self._streamsMutex = threading.Lock() # Streams that have the same stream ID as an active stream, when an # end-of-stream has been queued but not yet read (each entry in the map # is a list of stream objects) self._pendingStreams = {} if self._portLog is None: self._portLog = logging.getLogger("redhawk.bulkio.input." + name) _cmpMsg = "DEFAULT" _newSriMsg = "EMPTY" _sriChangeMsg = "EMPTY" if sriCompare != bulkio.sri.compare: _cmpMsg = "USER_DEFINED" if newSriCallback: _newSriMsg = "USER_DEFINED" if sriChangeCallback: _sriChangeMsg = "USER_DEFINED" if self._portLog: self._portLog.debug( "bulkio::InPort CTOR port:" + str(name) + " Blocking/MaxInputQueueSize " + str(self.blocking) + "/" + str(maxsize) + " SriCompare/NewSriCallback/SriChangeCallback " + _cmpMsg + "/" + _newSriMsg + "/" + _sriChangeMsg)
def __init__(self, name, traits): UsesPort.__init__(self, name, traits.PortType) # Type metadata self.BurstType = traits.BurstType self._bytesPerElement = traits.size() # Logging; default logger is the class name self._log = logging.getLogger(self.__class__.__name__) # Perform latency monitoring and deferred pushes on a separate thread self._monitor = ExecutorService() # Queue management self._queueMutex = threading.Lock() self._defaultQueue = Queue(self, self.DEFAULT_MAX_BURSTS, 0.9*2*(1024**2), self.DEFAULT_LATENCY_THRESHOLD) self._streamQueues = {} # Default to all connections receiving all streams interleaved self._routingMode = ROUTE_ALL_INTERLEAVED self._routes = {}
def __init__(self, name, traits): UsesPort.__init__(self, name, traits.PortType) # Type metadata self.BurstType = traits.BurstType self._bytesPerElement = traits.size() # Logging; default logger is the class name self._log = logging.getLogger(self.__class__.__name__) # Perform latency monitoring and deferred pushes on a separate thread self._monitor = ExecutorService() # Queue management self._queueMutex = threading.Lock() self._defaultQueue = Queue(self, self.DEFAULT_MAX_BURSTS, 0.9 * 2 * (1024**2), self.DEFAULT_LATENCY_THRESHOLD) self._streamQueues = {} # Default to all connections receiving all streams interleaved self._routingMode = ROUTE_ALL_INTERLEAVED self._routes = {}
class IDLLibrary(object): __log = logging.getLogger(__name__).getChild('IDLLibrary') def __init__(self): self._interfaces = {} self._searchPaths = [] self._includePaths = [] self._lock = threading.Lock() # Track parsed files to avoid repeatedly parsing the same files self._parsed = set() self._omniPath = _getIDLDir('omniORB4', 'omniORB') self.addIncludePath(self._omniPath) self._cosPath = _getIDLDir('omniCOS4', 'omniORB/COS') self.addIncludePath(self._cosPath) def addIncludePath(self, path): if path not in self._includePaths: self._includePaths.append(path) def addSearchPath(self, path): if not path in self._searchPaths: self._searchPaths.append(path) self.addIncludePath(path) def _importCosModule(self, name): # Cos interfaces follow a predictable file naming scheme filename = name.replace('omg.org/', '') + '.idl' fullpath = os.path.join(self._cosPath, filename) self._importFile(fullpath) def _importFile(self, filename): if filename in self._parsed: return elif not os.path.exists(filename): return # Parse file and save the interfaces; this may return interfaces # from other files as well. for interface in importIDL.getInterfacesFromFile(filename, self._includePaths): # Only add new, unseen interfaces. if interface.repoId not in self._interfaces: self._interfaces[interface.repoId] = interface # Mark the file that provided this interface as parsed self._parsed.add(interface.fullpath) # Mark the file as parsed in case it didn't contain any interfaces self._parsed.add(filename) def _importAllFromPath(self, path): if not path: return for filename in glob.glob(os.path.join(path, '*.idl')): self._importFile(filename) def _importAllRecursive(self, path): self._importAllFromPath(path) for root, dirs, files in os.walk(path): for dirname in dirs: self._importAllFromPath(os.path.join(root, dirname)) def _importBulkioModule(self, name): bulkio_path = self._findPath('ossie/BULKIO') if not bulkio_path: # BULKIO is not installed return if name.startswith('data'): filename = os.path.join(bulkio_path, 'bio_'+name+'.idl') if os.path.exists(filename): self.__log.trace("Importing file '%s'", filename) self._importFile(filename) return self.__log.trace('Importing all BULKIO IDL') self._importAllFromPath(bulkio_path) def _importOmniPath(self): # Import the omniORB IDL path non-recursively, as COS is typically # installed as a subdirectory, and many of its modules dump errors to # the console. self.__log.trace("Importing all from '%s'", self._omniPath) for filename in glob.glob(os.path.join(self._omniPath, '*.idl')): if os.path.basename(filename) in ('poa.idl', 'poa_include.idl'): # Exclude the POA IDL, which is unlikely to appear in real use # but will cause parser warnings continue self._importFile(filename) def _parseInterface(self, repoid): try: idl, name, version = repoid.split(':') except ValueError: raise InvalidRepoIDError(repoid) if '/' in name: namespace, name = name.rsplit('/', 1) else: namespace = '' if namespace.startswith('omg.org/'): self._importCosModule(namespace) elif namespace == 'BULKIO': self._importBulkioModule(name) elif namespace: if namespace in ('CF', 'ExtendedCF', 'StandardEvent', 'ExtendedEvent', 'PortTypes'): search_path = self._findPath('ossie/CF') else: search_path = self._findPath(os.path.join('redhawk', namespace)) self.__log.trace("Importing all from '%s'", search_path) self._importAllFromPath(search_path) def _findInterface(self, repoid): self.__log.trace("Searching for interface '%s'", repoid) self._parseInterface(repoid) if repoid in self._interfaces: self.__log.trace("Found '%s' in standard location", repoid) return self.__log.trace("Doing exhaustive search") for path in self._searchPaths: self.__log.trace("Importing all recursively from '%s'", path) self._importAllRecursive(path) if repoid in self._interfaces: self.__log.trace("Found '%s' in search path '%s'", repoid, path) return # If all else fails, search the omniORB IDL self._importOmniPath() if repoid in self._interfaces: self.__log.trace("Found '%s' in search path '%s'", repoid, self._omniPath) return self.__log.trace("No definition found for '%s'", repoid) def _findPath(self, dirname): for path in self._searchPaths: fullpath = os.path.join(path, dirname) if os.path.exists(fullpath): return fullpath return None def getInterface(self, repoid): self._lock.acquire() try: if not repoid in self._interfaces: self._findInterface(repoid) interface = self._interfaces.get(repoid, None) if interface is None: raise UnknownInterfaceError(repoid) else: return interface finally: self._lock.release()
"matplotlib-based plots are not available by default on Red Hat Enterprise Linux 5 (missing PyQt4 dependency)" ) else: raise RuntimeError( "Missing required package for sandbox plots: '%s'" % e) from ossie.utils.model import PortSupplier from ossie.utils.model.connect import PortEndpoint from ossie.utils.sb import domainless from ossie.utils.sandbox.helper import ThreadedSandboxHelper, ThreadStatus __all__ = ('LinePlot', 'LinePSD', 'RasterPlot', 'RasterPSD', 'XYPlot') log = logging.getLogger(__name__) class PlotBase(ThreadedSandboxHelper): """ Abstract base class for all matplotlib-based plots. Manages the provides port dictionary, the matplotlib figure, and the plot update thread. """ def __init__(self): _deferred_imports() ThreadedSandboxHelper.__init__(self) # Use 1/10th of a second for sleeping when there are no updates self.setThreadDelay(0.1) # Create provides port dictionary.
def __init__(self): self.H = [] self.packets = [] self.logger = logging.getLogger(self.__class__.__name__)
mode="readonly", kinds=("allocation",)) def allocate_BogoMipsCapacity (self, value): if self.BogoMipsCapacity < value: return False self.BogoMipsCapacity -= value return True def deallocate_BogoMipsCapacity (self, value): self.BogoMipsCapacity += value def updateUsageState (self): # Update usage state if self.BogoMipsCapacity == 0: self._usageState = CF.Device.BUSY elif self.BogoMipsCapacity == 100000000: self._usageState = CF.Device.IDLE else: self._usageState = CF.Device.ACTIVE def execute (self, *args): if self.crashEnabled: os.kill(os.getpid(), signal.SIGKILL) return ExecutableDevice.execute(self, *args) if __name__ == "__main__": logging.getLogger().setLevel(logging.DEBUG) logging.debug("Starting Device") start_device(CrashableExecutableDevice)
kinds=("allocation", )) def allocate_BogoMipsCapacity(self, value): if self.BogoMipsCapacity < value: return False self.BogoMipsCapacity -= value return True def deallocate_BogoMipsCapacity(self, value): self.BogoMipsCapacity += value def updateUsageState(self): # Update usage state if self.BogoMipsCapacity == 0: self._usageState = CF.Device.BUSY elif self.BogoMipsCapacity == 100000000: self._usageState = CF.Device.IDLE else: self._usageState = CF.Device.ACTIVE def execute(self, *args): if self.crashEnabled: os.kill(os.getpid(), signal.SIGKILL) return ExecutableDevice.execute(self, *args) if __name__ == "__main__": logging.getLogger().setLevel(logging.DEBUG) logging.debug("Starting Device") start_device(CrashableExecutableDevice)
# You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # import signal from ossie.cf import CF__POA from ossie.resource import Resource, start_component from ossie.utils.log4py import logging class orphaned_child(CF__POA.Resource, Resource): def __init__(self, *args, **kwargs): Resource.__init__(self, *args, **kwargs) # Ignore normal termination signals to ensure that the process is # orphaned when the parent is terminated; start_component() normally # establishes handlers for these before creating the component instance signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) signal.signal(signal.SIGQUIT, signal.SIG_IGN) def releaseObject(self): # Overriden to prevent the normal automatic process exit that occurs # after releaseObject() in 1.9+ pass if __name__ == '__main__': logging.getLogger().setLevel(logging.WARN) logging.debug("Starting Component") start_component(orphaned_child)
# import weakref from omniORB import CORBA import CosEventComm import CosEventChannelAdmin import CosEventChannelAdmin__POA from ossie.utils.log4py import logging from ossie.utils.notify import notification # Prepare the ORB orb = CORBA.ORB_init() poa = orb.resolve_initial_references("RootPOA") log = logging.getLogger(__name__) class EventChannel(CosEventChannelAdmin__POA.EventChannel): class ProxyPushConsumer(CosEventChannelAdmin__POA.ProxyPushConsumer): __slots__ = ('_channel', '_admin', '_supplier', '_connected') def __init__(self, channel, admin): self._channel = channel self._admin = admin self._supplier = None self._connected = False def push(self, data): if not self._connected: raise CosEventComm.Disconnected self._channel.push(data)