def registerPlugin(formats, plugin): if not hasattr(plugin, 'getInstance'): ProbeImplementationManager.getLogger().warning("ConfigFile plugin candidate for %s has no getInstance() entry point. Discarding." % format) else: for format in formats: RegisteredPlugins[format] = plugin ProbeImplementationManager.getLogger().info("ConfigFile plugin module registered for format %s" % format)
def deployProbe(self, type_, name): """ Instantiates and registers a new probe. Raises an exception in case of any error. """ self.getLogger().info("Deploying probe %s, type %s..." % (name, type_)) if not ProbeImplementationManager.getProbeImplementationClasses( ).has_key(type_): raise Exception("No factory registered for probe type %s" % type_) if self.probes.has_key(name): raise Exception( "A probe with this name is already deployed on this agent") probeImplementation = ProbeImplementationManager.getProbeImplementationClasses( )[type_]() probe = ProbeImplementationAdapter(self, name, type_, probeImplementation) # We reference the probe as deployed, though the registration may fail... self.probes[name] = probe # We should raise exception in case of duplicated names, ... if self.registered: self.registerProbe(probe) else: self.getLogger().info( "Deferred probe registration: agent not registered yet.")
def onTriSend(self, message, sutAddress): try: # FIXME: # Should go to a configured codec instance instead. # (since we modify the message here...) if not message.has_key('version'): message['version'] = self['version'] try: (encodedMessage, summary) = CodecManager.encode('http.request', message) except Exception as e: raise ProbeImplementationManager.ProbeException( 'Invalid request message format: cannot encode HTTP request:\n%s' % ProbeImplementationManager.getBacktrace()) # Connect if needed if not self.isConnected(): self.connect() # Send our payload self._httpConnection.send(encodedMessage) self.logSentPayload(summary, encodedMessage, "%s:%s" % self._httpConnection.getpeername()) # Now wait for a response asynchronously self.waitResponse() except Exception as e: raise ProbeImplementationManager.ProbeException( 'Unable to send HTTP request: %s' % str(e))
def initialize(tacsAddress): """ Initializes the AgentController proxy (client). """ ProbeImplementationManager.setLogger(TliLogger()) TACC.initialize("TE", tacsAddress) TACC.instance().setReceivedNotificationCallback(onTriEnqueueMsgNotification) TACC.instance().setLogNotificationCallback(onLogNotification)
def initialize(probePaths = ["../plugins/probes"], codecPaths = ["../plugins/codecs"]): # CodecManager logging diversion CodecManager.instance().setLogCallback(logging.getLogger("Agent.Codec").debug) # ProbeImplementationManager logging diversion ProbeImplementationManager.setLogger(logging.getLogger("Agent.Probe")) # Loading plugins: probes & codecs localPath = os.path.normpath(os.path.realpath(os.path.dirname(sys.modules[globals()['__name__']].__file__))) scanPlugins([ ((os.path.isabs(x) and x) or os.path.normpath(os.path.realpath('%s/%s' % (os.getcwd(), x)))) for x in codecPaths], label = "codec") scanPlugins([ ((os.path.isabs(x) and x) or os.path.normpath(os.path.realpath('%s/%s' % (os.getcwd(), x)))) for x in probePaths], label = "probe")
def initialize(tacsAddress): """ Initializes the AgentController proxy (client). """ ProbeImplementationManager.setLogger(TliLogger()) TACC.initialize("TE", tacsAddress) TACC.instance().setReceivedNotificationCallback( onTriEnqueueMsgNotification) TACC.instance().setLogNotificationCallback(onLogNotification)
def registerPlugin(formats, plugin): if not hasattr(plugin, 'getInstance'): ProbeImplementationManager.getLogger().warning( "ConfigFile plugin candidate for %s has no getInstance() entry point. Discarding." % format) else: for format in formats: RegisteredPlugins[format] = plugin ProbeImplementationManager.getLogger().info( "ConfigFile plugin module registered for format %s" % format)
def createProbe(uri, type_, transient=False): """ Instantiates a new Test Adapter (i.e. a Probe instance) with its adapter from type_, uri. If the uri if of the form name@agent, we look for a remote type, prefixing the provided type with a "remote." to look for actually implemented type. If it does not, this is a local implementation, and we prefix the type with "local." (uri form: probe:name) @type uri: string @param uri: a valid probe uri (probe:name or probe:name@agent) @type type_: string @param type_: the test adapter implementation type. Should not start with "remote." or "local.", as the prefix is automatically added based on the uri format. @rtype: Probe @returns: a new probe instance, unconfigured, or None if no implementation factory was found. """ # Derive the actual implementation identifier from the uri + given type u = Messages.Uri(uri) adapter = None if u.getUser(): # The probe is remote. # We may look for additional stubs (interceptors) here adapter = RemoteProbeAdapter() # No need for a local implementation. else: # We're looking for a local probe only. # Search for an implementation in local plugin space if ProbeImplementationManager.getProbeImplementationClasses().has_key( type_): probeImplementation = ProbeImplementationManager.getProbeImplementationClasses( )[type_]() adapter = LocalProbeAdapter(probeImplementation) if adapter: # May raise an exception if the attachment is not feasible (Stubs and remote probes) adapter.attachToUri(uri, type_) adapter.setTransient(transient) return adapter # Otherwise, nothing to do. else: raise TestermanSAException( "No registered factory for test adapter/probe type %s" % type_) return None
def _send(self, conn, data): encoder = self['default_encoder'] if encoder: try: (data, summary) = CodecManager.encode(encoder, data) except Exception: raise ProbeImplementationManager.ProbeException( 'Cannot encode outgoing message using defaut encoder:\n%s' % ProbeImplementationManager.getBacktrace()) self.logSentPayload(summary, data, "%s:%s" % conn.socket.getpeername()) else: self.logSentPayload("TCP data", data, "%s:%s" % conn.socket.getpeername()) conn.socket.send(data)
def onTriSend(self, message, sutAddress): """ Internal SSH probe message: { 'cmd': 'execute', 'command': string, 'host': string, 'username': string, 'password': string, [ 'timeout': float in s, 5.0 by default ] } { 'cmd': 'cancel' } The timeout is the maximum amount of time allowed to connect and start executing the command. The command itself may last forever. """ self.getLogger().debug("onTriSend(%s, %s)" % (unicode(message), unicode(sutAddress))) if not (isinstance(message, tuple) or isinstance(message, list)) and not len(message) == 2: raise Exception("Invalid message format") cmd, value = message if cmd == 'execute': m = { 'cmd': 'execute', 'command': value, 'host': self['host'], 'username': self['username'], 'password': self['password'], 'workingdir': self['working_dir'] } elif cmd == 'cancel': m = {'cmd': 'cancel'} else: raise Exception("Invalid message format") try: self._checkArgs(m, [('cmd', None)]) cmd = m['cmd'] if cmd == 'cancel': return self.cancelCommand() elif cmd == 'execute': self._checkArgs(m, [('command', None), ('host', None), ('username', None), ('password', None), ('timeout', self['timeout']), ('workingdir', None)]) command = m['command'] host = m['host'] username = m['username'] password = m['password'] timeout = m['timeout'] workingdir = m['workingdir'] try: self.executeCommand(command, username, host, password, timeout, workingdir) except Exception as e: self.triEnqueueMsg(str(e)) except Exception as e: raise ProbeImplementationManager.ProbeException( self._getBacktrace())
def _waitForDeleteResult(self, request, timeout): """ Executed in a dedicated thread. """ res = False deleteResult = False try: # Let's pool the result regularly, while we have a pending request. resultType, resultData = self._server.result(request, timeout=timeout) self.getLogger().debug('Delete result: %s' % resultData) res = True # resultData is an empty list if OK # deleteResult = resultData deleteResult = True except ldap.USER_CANCELLED: pass except ldap.NO_SUCH_OBJECT: res = True deleteResult = False except Exception as e: self._onError(request, ProbeImplementationManager.getBacktrace()) if res: self._onResult(request, deleteResult, 'delete')
def _waitForSearchResult(self, request, timeout): """ Executed in a dedicated thread. """ res = False resultSet = [] try: # Let's pool the result regularly, while we have a pending request. resultType, resultData = self._server.result(request, timeout=timeout) self.getLogger().debug('Search result: %s' % resultData) for (dn, attributes) in resultData: resultSet.append({ 'dn': self._stripBaseDn(dn), 'attributes': attributes }) res = True except ldap.USER_CANCELLED: pass except ldap.NO_SUCH_OBJECT: res = True resultSet = [] except Exception as e: self._onError(request, ProbeImplementationManager.getBacktrace()) if res: self._onResult(request, resultSet, 'search')
def write(self, dn, attributes): """ Synchronous implementation for now... """ dn = self._addBaseDn(dn) if not self._ensureBind(): return completed = False try: try: # a list of tuples (dn, attribute dict) searchResult = self._server.search_s(dn, ldap.SCOPE_BASE) if not searchResult: # Let's add it self._server.add_s(dn, ldap.modlist.addModlist(attributes)) completed = True elif len(searchResult) == 1: # Let's modify the entry # modifyMolist(previous values, new values) self._server.modify_s(dn, ldap.modlist.modifyModlist(searchResult[0][1], attributes, ignore_oldexistent = 1)) completed = True else: # Cannot write multiple DNs. self.triEnqueueMsg(('error', 'Multiple entries correspond to this dn, cannot update'), self['server_url']) except ldap.NO_SUCH_OBJECT: # Let's add it self._server.add_s(dn, ldap.modlist.addModlist(attributes)) completed = True except Exception, e: self.triEnqueueMsg(('error', 'Error while updating/adding some data:\n%s' % ProbeImplementationManager.getBacktrace()), self['server_url'])
def createProbe(uri, type_, transient = False): """ Instantiates a new Test Adapter (i.e. a Probe instance) with its adapter from type_, uri. If the uri if of the form name@agent, we look for a remote type, prefixing the provided type with a "remote." to look for actually implemented type. If it does not, this is a local implementation, and we prefix the type with "local." (uri form: probe:name) @type uri: string @param uri: a valid probe uri (probe:name or probe:name@agent) @type type_: string @param type_: the test adapter implementation type. Should not start with "remote." or "local.", as the prefix is automatically added based on the uri format. @rtype: Probe @returns: a new probe instance, unconfigured, or None if no implementation factory was found. """ # Derive the actual implementation identifier from the uri + given type u = Messages.Uri(uri) adapter = None if u.getUser(): # The probe is remote. # We may look for additional stubs (interceptors) here adapter = RemoteProbeAdapter() # No need for a local implementation. else: # We're looking for a local probe only. # Search for an implementation in local plugin space if ProbeImplementationManager.getProbeImplementationClasses().has_key(type_): probeImplementation = ProbeImplementationManager.getProbeImplementationClasses()[type_]() adapter = LocalProbeAdapter(probeImplementation) if adapter: # May raise an exception if the attachment is not feasible (Stubs and remote probes) adapter.attachToUri(uri, type_) adapter.setTransient(transient) return adapter # Otherwise, nothing to do. else: raise TestermanSAException("No registered factory for test adapter/probe type %s" % type_) return None
def scanPlugins(paths, label): for path in paths: if not path in sys.path: sys.path.append(path) try: for m in os.listdir(path): if m.startswith('__init__') or not (os.path.isdir(path + '/' + m) or m.endswith('.py')) or m.startswith('.'): continue if m.endswith('.py'): m = m[:-3] try: plugin = __import__(m) registerPlugin(plugin.SUPPORTED_CONF_FILE_FORMATS, plugin) except Exception, e: ProbeImplementationManager.getLogger().warning("Unable to import %s %s: %s" % (m, label, str(e))) except Exception, e: ProbeImplementationManager.getLogger().warning("Unable to scan %s path for %ss: %s" % (path, label, str(e)))
def onTriSend(self, message, sutAddress): try: # FIXME: # Should go to a configured codec instance instead. # (since we modify the message here... should be a copy instead) if not message.has_key('version'): message['version'] = self['version'] if not message.has_key('headers'): message['headers'] = {} cseq = None # Non-strict mode: CSeq management: we add one if none is found if not self['strict_mode']: # Look for a CSeq for k, v in message['headers'].items(): if k.lower() == 'cseq': cseq = str(v) if cseq is None: # Generate and set a cseq message['headers']['CSeq'] = self.generateCSeq() cseq = str(message['headers']['CSeq']) try: encodedMessage, summary = CodecManager.encode( 'rtsp.request', message) except Exception as e: raise ProbeImplementationManager.ProbeException( 'Invalid request message format: cannot encode RTSP request' ) # Connect if needed if not self.isConnected(): self.connect() # Send our payload self._connection.send(encodedMessage) self.logSentPayload(summary, encodedMessage, "%s:%s" % self._connection.getpeername()) # Now wait for a response asynchronously self.waitResponse(cseq=cseq) except Exception as e: raise ProbeImplementationManager.ProbeException( 'Unable to send RTSP request: %s' % str(e))
def scanPlugins(paths, label): for path in paths: if not path in sys.path: sys.path.append(path) try: for m in os.listdir(path): if m.startswith('__init__') or not ( os.path.isdir(path + '/' + m) or m.endswith('.py')) or m.startswith('.'): continue if m.endswith('.py'): m = m[:-3] try: plugin = __import__(m) registerPlugin(plugin.SUPPORTED_CONF_FILE_FORMATS, plugin) except Exception, e: ProbeImplementationManager.getLogger().warning( "Unable to import %s %s: %s" % (m, label, str(e))) except Exception, e: ProbeImplementationManager.getLogger().warning( "Unable to scan %s path for %ss: %s" % (path, label, str(e)))
def run(self): # Main poll loop while not self._stopEvent.isSet(): try: listening = self._probe._getListeningSockets() # active is a dict of socket: peerAddress active = self._probe._getActiveSockets() rset = listening + active.keys() r, w, e = select.select(rset, [], [], 0.001) for s in r: try: if s in listening: self._probe.getLogger().debug( "Accepting a new connection") (sock, addr) = s.accept() if self._probe['use_ssl']: sock = self._probe._toSsl(sock, serverSide=True) self._probe._onIncomingConnection(sock, addr) # Raise a new connection notification event - soon else: # Active socket. We get its peername from its registration, not via s.getpeername() # as the remote endpoint might have sent a RST and disconnected, while we still have some data to read for it. # Calling s.getpeername() would then fail, preventing us from reading that remaining data. addr = active.get(s) self._probe.getLogger().debug( "New data to read from %s" % str(addr)) data = s.recv(65535) if not data: self._probe.getLogger().debug( "%s disconnected by peer" % str(addr)) self._probe._feedData( addr, '' ) # notify the feeder that we won't have more data self._probe._disconnect( addr, reason="disconnected by peer") else: # New received message. self._probe._feedData(addr, data) except Exception as e: self._probe.getLogger().warning( "exception while polling active/listening sockets: %s" % str(e) + ProbeImplementationManager.getBacktrace()) except Exception as e: self._probe.getLogger().warning( "exception while polling active/listening sockets: %s" % str(e)) # Avoid 100% CPU usage when select() raises an error time.sleep(0.01)
def deployProbe(self, type_, name): """ Instantiates and registers a new probe. Raises an exception in case of any error. """ self.getLogger().info("Deploying probe %s, type %s..." % (name, type_)) if not ProbeImplementationManager.getProbeImplementationClasses().has_key(type_): raise Exception("No factory registered for probe type %s" % type_) if self.probes.has_key(name): raise Exception("A probe with this name is already deployed on this agent") probeImplementation = ProbeImplementationManager.getProbeImplementationClasses()[type_]() probe = ProbeImplementationAdapter(self, name, type_, probeImplementation) # We reference the probe as deployed, though the registration may fail... self.probes[name] = probe # We should raise exception in case of duplicated names, ... if self.registered: self.registerProbe(probe) else: self.getLogger().info("Deferred probe registration: agent not registered yet.")
def onTriSend(self, message, sutAddress): (cmd, args) = message if cmd == 'startWatchingDirs': self._checkArgs(args, [('dirs', None), ('interval', 1.0), ('patterns', [r'.*'])]) compiledPatterns = [re.compile(x) for x in args['patterns']] self.startWatching(dirs=args['dirs'], interval=args['interval'], patterns=compiledPatterns) elif cmd == 'stopWatchingDirs': self.stopWatching() else: raise ProbeImplementationManager.ProbeException( "Invalid message format (%s)" % cmd)
def onTriSend(self, message, sutAddress): (cmd, args) = message if cmd == 'startWatchingFiles': self._checkArgs(args, [ ('files', None), ('interval', 1.0), ('patterns', [ r'.*' ])] ) compiledPatterns = [ re.compile(x) for x in args['patterns']] # Glob files here - glob.glob() blocks when called from the watching thread (?! - blocked in fnmatch.filter: import os,posixpath) files = [] for arg in args['files']: files += glob.glob(arg) self.startWatching(files = files, interval = args['interval'], patterns = compiledPatterns) elif cmd == 'stopWatchingFiles': self.stopWatching() else: raise ProbeImplementationManager.ProbeException("Invalid message format (%s)" % cmd)
def registerAgent(self): self.registered = False req = Messages.Request(method = "REGISTER", uri = self.getUri(), protocol = "Xa", version = "1.0") # we should add a list of supported probe types, os, etc ? req.setHeader("Agent-Supported-Probe-Types", ','.join(ProbeImplementationManager.getProbeImplementationClasses().keys())) response = self.request(req) if not response: raise Exception("Timeout") if response.getStatusCode() != 200: raise Exception("Unable to register: " + response.getReasonPhrase()) self.registered = True self.getLogger().info("Agent %s registered" % self.getUri())
def write(self, dn, attributes): """ Synchronous implementation for now... """ dn = self._addBaseDn(dn) if not self._ensureBind(): return completed = False try: try: # a list of tuples (dn, attribute dict) searchResult = self._server.search_s(dn, ldap.SCOPE_BASE) if not searchResult: # Let's add it self._server.add_s(dn, ldap.modlist.addModlist(attributes)) completed = True elif len(searchResult) == 1: # Let's modify the entry # modifyMolist(previous values, new values) self._server.modify_s( dn, ldap.modlist.modifyModlist(searchResult[0][1], attributes, ignore_oldexistent=1)) completed = True else: # Cannot write multiple DNs. self.triEnqueueMsg(( 'error', 'Multiple entries correspond to this dn, cannot update' ), self['server_url']) except ldap.NO_SUCH_OBJECT: # Let's add it self._server.add_s(dn, ldap.modlist.addModlist(attributes)) completed = True except Exception as e: self.triEnqueueMsg( ('error', 'Error while updating/adding some data:\n%s' % ProbeImplementationManager.getBacktrace()), self['server_url']) if completed: self.triEnqueueMsg(('writeResult', True))
def _waitForSearchResult(self, request, timeout): """ Executed in a dedicated thread. """ res = False resultSet = [] try: # Let's pool the result regularly, while we have a pending request. resultType, resultData = self._server.result(request, timeout = timeout) self.getLogger().debug('Search result: %s' % resultData) for (dn, attributes) in resultData: resultSet.append({'dn': self._stripBaseDn(dn), 'attributes': attributes}) res = True except ldap.USER_CANCELLED: pass except ldap.NO_SUCH_OBJECT: res = True resultSet = [] except Exception, e: self._onError(request, ProbeImplementationManager.getBacktrace())
def run(self): self._probe.getLogger().debug("Starting command execution thread...") try: if isinstance(self._command, list): self._process = pexpect.spawn(self._command[0], self._command[1:]) else: # Assume this is a string self._process = pexpect.spawn(self._command) self._process.setwinsize(24, 80) except Exception as e: self._probe.triEnqueueMsg( 'Internal execution error: %s' % ProbeImplementationManager.getBacktrace()) retcode = None alive = True while alive: alive = self._process.isalive() try: r = self._process.read_nonblocking(1024, self._timeout) self.handleOutput(r, 'stdout') except: time.sleep(0.001) # Consume the whole output try: while 1: r = self._process.read_nonblocking(1024, self._timeout) self.handleOutput(r, 'stdout') except: pass # nothing more to read self._process.close() retcode = self._process.status self._process = None self._probe._onExecThreadTerminated() if self.getReportStatus(): self._probe.triEnqueueMsg({'status': retcode}) self._stoppedEvent.set()
def _waitForDeleteResult(self, request, timeout): """ Executed in a dedicated thread. """ res = False deleteResult = False try: # Let's pool the result regularly, while we have a pending request. resultType, resultData = self._server.result(request, timeout = timeout) self.getLogger().debug('Delete result: %s' % resultData) res = True # resultData is an empty list if OK # deleteResult = resultData deleteResult = True except ldap.USER_CANCELLED: pass except ldap.NO_SUCH_OBJECT: res = True deleteResult = False except Exception, e: self._onError(request, ProbeImplementationManager.getBacktrace())
except Exception, e: self.getLogger().warning("Unable to remove added file %s: %s" % (path, str(e))) for path in self._modifiedFiles: try: self.getLogger().debug("Restoring modified/deleted file %s..." % path) # ... except Exception, e: self.getLogger().warning("Unable to restore modified/deleted file %s: %s" % (path, str(e))) self._addedFiles = [] self._modifiedFiles = [] def _backupFileToDelete(self, path): if not path in self._addedFiles + self._modifiedFiles: pass # targetDir = "%s/ # os.makedirs # target = "%s/ # self.getLogger().info("Backup up file to delete %s -> %s" % (path, target) # shutil.move(path, target) # self._deletedFiles.append(path) def _backupFile(self, path): if not path in self._addedFiles + self._modifiedFiles: # if fileExists(path): pass ProbeImplementationManager.registerProbeImplementationClass('file.manager', FileManagerProbe)
self._probe.getLogger().debug( "New data to read from %s" % str(addr)) data = s.recv(65535) if not data: self._probe.getLogger().debug( "%s disconnected by peer" % str(addr)) self._probe._feedData( addr, '' ) # notify the feeder that we won't have more data self._probe._disconnect( addr, reason="disconnected by peer") else: # New received message. self._probe._feedData(addr, data) except Exception as e: self._probe.getLogger().warning( "exception while polling active/listening sockets: %s" % str(e) + ProbeImplementationManager.getBacktrace()) except Exception as e: self._probe.getLogger().warning( "exception while polling active/listening sockets: %s" % str(e)) # Avoid 100% CPU usage when select() raises an error time.sleep(0.01) ProbeImplementationManager.registerProbeImplementationClass('tcp', TcpProbe)
self._probe._onIncomingConnection(sock, addr) # Raise a new connection notification event - soon else: addr = s.getpeername() self._probe.getLogger().debug( "New data to read from %s" % str(addr)) data = s.recv(65535) if not data: self._probe.getLogger().debug( "%s disconnected by peer" % str(addr)) self._probe._disconnect( addr, reason="disconnected by peer") else: # New received message. self._probe._feedData(addr, data) except Exception as e: self._probe.getLogger().warning( "exception while polling active/listening sockets: %s" % str(e)) except Exception as e: self._probe.getLogger().warning( "exception while polling active/listening sockets: %s" % str(e)) # Avoid 100% CPU usage when select() raised an error time.sleep(0.01) ProbeImplementationManager.registerProbeImplementationClass('sctp', SctpProbe)
largs.append(args['value']) if SeleniumCommandPrototypes.has_key(cmd): proto = SeleniumCommandPrototypes[cmd] method = getattr(self._selenium, proto['method']) self.logSentPayload( "%s(target = %s, value = %s)" % (cmd, args.get('target', None), args.get('value', None)), payload='', sutAddress='%s:%s' % (self['rc_host'], self['rc_port'])) ret = method(cmd, largs) if proto['treturnType']: self.logReceivedPayload("%s returned" % cmd, payload=ret, sutAddress='%s:%s' % (self['rc_host'], self['rc_port'])) self.triEnqueueMsg(ret) else: # We assume that no return value is expected method = getattr(self._selenium, "do_command") self.logSentPayload( "%s(target = %s, value = %s)" % (cmd, args.get('target', None), args.get('value', None)), payload='', sutAddress='%s:%s' % (self['rc_host'], self['rc_port'])) ret = method(cmd, largs) ProbeImplementationManager.registerProbeImplementationClass( 'selenium', SeleniumProbe)
(cmd, args) = message # Loose command implementation only for now largs = [] if args.has_key('target'): largs = [ args['target'] ] if args.has_key('value'): largs.append(args['value']) if SeleniumCommandPrototypes.has_key(cmd): proto = SeleniumCommandPrototypes[cmd] method = getattr(self._selenium, proto['method']) self.logSentPayload("%s(target = %s, value = %s)" % (cmd, args.get('target', None), args.get('value', None)), payload = '', sutAddress = '%s:%s' % (self['rc_host'], self['rc_port'])) ret = method(cmd, largs) if proto['treturnType']: self.logReceivedPayload("%s returned" % cmd, payload = ret, sutAddress = '%s:%s' % (self['rc_host'], self['rc_port'])) self.triEnqueueMsg(ret) else: # We assume that no return value is expected method = getattr(self._selenium, "do_command") self.logSentPayload("%s(target = %s, value = %s)" % (cmd, args.get('target', None), args.get('value', None)), payload = '', sutAddress = '%s:%s' % (self['rc_host'], self['rc_port'])) ret = method(cmd, largs) ProbeImplementationManager.registerProbeImplementationClass('selenium', SeleniumProbe)
import os def scanPlugins(paths, label): for path in paths: if not path in sys.path: sys.path.append(path) try: for m in os.listdir(path): if m.startswith('__init__') or not (os.path.isdir(path + '/' + m) or m.endswith('.py')) or m.startswith('.'): continue if m.endswith('.py'): m = m[:-3] try: plugin = __import__(m) registerPlugin(plugin.SUPPORTED_CONF_FILE_FORMATS, plugin) except Exception, e: ProbeImplementationManager.getLogger().warning("Unable to import %s %s: %s" % (m, label, str(e))) except Exception, e: ProbeImplementationManager.getLogger().warning("Unable to scan %s path for %ss: %s" % (path, label, str(e))) # On import, scan plugins import os.path currentDir = os.path.normpath(os.path.realpath(os.path.dirname(sys.modules[globals()['__name__']].__file__))) scanPlugins([ "%s/plugins" % currentDir ], 'configuration file accessor') ## # ConfigFile probe class registration ## ProbeImplementationManager.registerProbeImplementationClass('configurationfile', ConfigFileProbe)
try: conn = dbapi.connect(user = self['user'], passwd = self['password'], db = self['database'], host = self['host']) # '%s:%s' % (self['host'], self['port'])) cursor = conn.cursor() self.logSentPayload(query.split(' ')[0].upper(), query) try: cursor.execute(query) conn.commit() except Exception, e: conn.rollback() cursor.close() conn.close() raise e res = [] if cursor.description: # equivalent to "the previous execution() provided a set ?" columnNames = map(lambda x: x[0], cursor.description) for row in cursor.fetchall(): res.append(dict(zip(columnNames, row))) self.triEnqueueMsg(('result', res)) cursor.close() conn.close() except Exception, e: self.getLogger().warning("Exception while handling a query: %s" % str(e)) self.triEnqueueMsg(('error', str(e))) ProbeImplementationManager.registerProbeImplementationClass('sql.mysql', MySqlProbe)
assert _compareLists([1, 2, 3, 4, 5, 6], [1, 2, 5, 6]) == ([3, 4], []) assert _compareLists([1, 2, 3, 4, 5, 6], [2, 3, 5, 6]) == ([1, 4], []) assert _compareLists([4, 5, 6], [1, 2, 3, 4, 5, 6]) == ([], [1, 2, 3]) assert _compareLists([1, 3, 4, 5, 6], [1, 2, 3, 4, 7]) == ([5, 6], [2, 7]) assert _compareLists2([1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]) == ([], []) assert _compareLists2([1, 2, 5, 6], [1, 2, 3, 4, 5, 6]) == ([], [3, 4]) assert _compareLists2([1, 2, 3, 4, 5, 6], [1, 2, 5, 6]) == ([3, 4], []) assert _compareLists2([1, 2, 3, 4, 5, 6], [2, 3, 5, 6]) == ([1, 4], []) assert _compareLists2([4, 5, 6], [1, 2, 3, 4, 5, 6]) == ([], [1, 2, 3]) assert _compareLists2([1, 3, 4, 5, 6], [1, 2, 3, 4, 7]) == ([5, 6], [2, 7]) import random import time l = 10000 l1 = [random.randrange(0, l * 10) for x in range(0, l)] l2 = [random.randrange(0, l * 10) for x in range(0, l)] l1.sort() l2.sort() for f in (_compareLists, _compareLists2, _compareLists3): start = time.time() f(l1, l2) stop = time.time() print "%s: %s" % (f, stop - start) else: ProbeImplementationManager.registerProbeImplementationClass("watcher.dir", DirWatcherProbe)
def onTriSAReset(self): pass def onTriSend(self, message, sutAddress): try: (operation, args) = message except: raise Exception("Invalid message format") proxy = ServerProxy(self, self['server_url']) # The following code should be executed in a dedicated thread try: f = getattr(proxy, operation) if isinstance(args, list): ret = f(*args) elif isinstance(args, dict): ret = f(**args) else: ret = f(args) event = ('response', ret) except xmlrpclib.Fault, e: event = ('fault', { 'code': e.faultCode, 'string': e.faultString }) # Raise other exceptions if needed self.triEnqueueMsg(event) ProbeImplementationManager.registerProbeImplementationClass('xmlrpc.client', XmlRpcClientProbe)
def onTriSAReset(self): pass def onTriSend(self, message, sutAddress): try: (operation, args) = message except: raise Exception("Invalid message format") proxy = ServerProxy(self, self['server_url']) # The following code should be executed in a dedicated thread try: f = getattr(proxy, operation) if isinstance(args, list): ret = f(*args) elif isinstance(args, dict): ret = f(**args) else: ret = f(args) event = ('response', ret) except xmlrpclib.Fault as e: event = ('fault', {'code': e.faultCode, 'string': e.faultString}) # Raise other exceptions if needed self.triEnqueueMsg(event) ProbeImplementationManager.registerProbeImplementationClass( 'xmlrpc.client', XmlRpcClientProbe)
cursor = conn.cursor() self.logSentPayload(query.split(' ')[0].upper(), query) try: cursor.execute(str(query)) conn.commit() except Exception, e: self.getLogger().warning( "Exception while executing query: %s" % getBacktrace()) conn.rollback() cursor.close() conn.close() raise e res = [] if cursor.description: # equivalent to "the previous execution() provided a set ?" columnNames = map(lambda x: x[0], cursor.description) for row in cursor.fetchall(): res.append(dict(zip(columnNames, row))) self.triEnqueueMsg(('result', res)) cursor.close() conn.close() except Exception, e: self.getLogger().warning("Exception while handling a query: %s" % getBacktrace()) self.triEnqueueMsg(('error', str(e))) ProbeImplementationManager.registerProbeImplementationClass( 'sql.oracle', OracleProbe)
assert (_compareLists([1, 2, 3, 4, 5, 6], [2, 3, 5, 6]) == ([1, 4], [])) assert (_compareLists([4, 5, 6], [1, 2, 3, 4, 5, 6]) == ([], [1, 2, 3])) assert (_compareLists([1, 3, 4, 5, 6], [1, 2, 3, 4, 7]) == ([5, 6], [2, 7])) assert (_compareLists2([1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]) == ([], [])) assert (_compareLists2([1, 2, 5, 6], [1, 2, 3, 4, 5, 6]) == ([], [3, 4])) assert (_compareLists2([1, 2, 3, 4, 5, 6], [1, 2, 5, 6]) == ([3, 4], [])) assert (_compareLists2([1, 2, 3, 4, 5, 6], [2, 3, 5, 6]) == ([1, 4], [])) assert (_compareLists2([4, 5, 6], [1, 2, 3, 4, 5, 6]) == ([], [1, 2, 3])) assert (_compareLists2([1, 3, 4, 5, 6], [1, 2, 3, 4, 7]) == ([5, 6], [2, 7])) import random import time l = 10000 l1 = [random.randrange(0, l * 10) for x in range(0, l)] l2 = [random.randrange(0, l * 10) for x in range(0, l)] l1.sort() l2.sort() for f in (_compareLists, _compareLists2, _compareLists3): start = time.time() f(l1, l2) stop = time.time() print "%s: %s" % (f, stop - start) else: ProbeImplementationManager.registerProbeImplementationClass( "watcher.dir", DirWatcherProbe)
conn = dbapi.connect(str(self['user']), str(self['password']), dsn) cursor = conn.cursor() self.logSentPayload(query.split(' ')[0].upper(), query) try: cursor.execute(str(query)) conn.commit() except Exception, e: self.getLogger().warning("Exception while executing query: %s" % getBacktrace()) conn.rollback() cursor.close() conn.close() raise e res = [] if cursor.description: # equivalent to "the previous execution() provided a set ?" columnNames = map(lambda x: x[0], cursor.description) for row in cursor.fetchall(): res.append(dict(zip(columnNames, row))) self.triEnqueueMsg(('result', res)) cursor.close() conn.close() except Exception, e: self.getLogger().warning("Exception while handling a query: %s" % getBacktrace()) self.triEnqueueMsg(('error', str(e))) ProbeImplementationManager.registerProbeImplementationClass('sql.oracle', OracleProbe)
# OK, scan the file to get matching new lines self._probe.getLogger().debug( "File %s changed since the last tick, starting at %d" % (filename, offset)) f = open(filename, 'r') f.seek(offset) newlines = f.readlines() f.close() # Technically, the file may have grown since we took the ref size # We should lock the file until we complete our analysis and reading # but the current implementation should be enough for typical probe usages for line in newlines: for pattern in self._patterns: m = pattern.match(line) if m: event = { 'filename': filename, 'line': line.strip() } # Should we strip the line ? for k, v in m.groupdict().items(): event['matched_%s' % k] = v self._probe.triEnqueueMsg(event) # A line can be matched only once. break # else no match ProbeImplementationManager.registerProbeImplementationClass( "watcher.file", FileWatcherProbe)
if not inspect.ismethod(attr): return True if self.retValuePattern.search(attr.__name__): return True return False def _isElementPresent(self, target): try: self._getWebelement(target) except NoSuchElementException, e: return False return True def _isAlertPresent(self): try: self.driver.switch_to_alert() except NoAlertPresentException, e: return False return True def _getDriverObj(self): return self.driver def _reset(self): if self.driver: if self['auto_shutdown']: self.driver.quit() self.driver = None ProbeImplementationManager.registerProbeImplementationClass('selenium.webdriver', SeleniumWebdriverProbe)
self._ssh.sendline(actualCommandLine) # Wait for a command completion while not self._stopEvent.isSet(): if self._ssh.prompt(0.1): # We got a completion - skip the command line (that could be multiline) that have been # echoed. output = '\n'.join( self._ssh.before.split('\n')[len(splitcmd):]) self._ssh.sendline('echo $?') self._ssh.prompt() # 'before' contains: line 0: echo $? , line 1: the echo output status = int(self._ssh.before.split('\n')[1].strip()) self._ssh.logout() break except Exception as e: self._probe.triEnqueueMsg('Internal SSH error: %s' % str(e)) # Kill the ssh session, if still active for wathever reason try: self._ssh.terminate() except: pass self._probe.onSshThreadTerminated(status, output) def stop(self): self._stopEvent.set() ProbeImplementationManager.registerProbeImplementationClass("ssh", SshProbe)
for m in os.listdir(path): if m.startswith('__init__') or not ( os.path.isdir(path + '/' + m) or m.endswith('.py')) or m.startswith('.'): continue if m.endswith('.py'): m = m[:-3] try: plugin = __import__(m) registerPlugin(plugin.SUPPORTED_CONF_FILE_FORMATS, plugin) except Exception, e: ProbeImplementationManager.getLogger().warning( "Unable to import %s %s: %s" % (m, label, str(e))) except Exception, e: ProbeImplementationManager.getLogger().warning( "Unable to scan %s path for %ss: %s" % (path, label, str(e))) # On import, scan plugins import os.path currentDir = os.path.normpath( os.path.realpath( os.path.dirname(sys.modules[globals()['__name__']].__file__))) scanPlugins(["%s/plugins" % currentDir], 'configuration file accessor') ## # ConfigFile probe class registration ## ProbeImplementationManager.registerProbeImplementationClass( 'configurationfile', ConfigFileProbe)
if self.retValuePattern.search(attr.__name__): return True return False def _isElementPresent(self, target): try: self._getWebelement(target) except NoSuchElementException, e: return False return True def _isAlertPresent(self): try: self.driver.switch_to_alert() except NoAlertPresentException, e: return False return True def _getDriverObj(self): return self.driver def _reset(self): if self.driver: if self['auto_shutdown']: self.driver.quit() self.driver = None ProbeImplementationManager.registerProbeImplementationClass( 'selenium.webdriver', SeleniumWebdriverProbe)
# TODO: use a bitmap of updated properties if (pt, src[0], src[1], ssrc) != ( lastPt, lastSourceIp, lastSourcePort, lastSsrc): # PT or emitter updated: raise a stop then a start event. self._probe.triEnqueueMsg(('stoppedReceivingRtp', { 'reason': 'updated' }), "%s:%s" % src) self._probe.logReceivedPayload("Receiving RTP...", data, "%s:%s" % src) self._probe.triEnqueueMsg(('startedReceivingRtp', { 'payloadType': pt, 'ssrc': ssrc, 'fromIp': lastSourceIp, 'fromPort': lastSourcePort }), "%s:%s" % src) # Update stream properties with the current values lastPt = pt lastSourceIp, lastSourcePort = src lastSsrc = ssrc lastTime = time.time() except Exception as e: self._probe.getLogger().error( "Exception while listening RTP: %s" % str(e)) self._probe._conditionallyCloseSocket(fromSocket) ProbeImplementationManager.registerProbeImplementationClass('rtp', RtpProbe)
self._probe.getLogger().debug('Got some input on %s: (%s)' % (stream, repr(data))) buf = self._buffer[stream] buf += data if self._separator: msgs = buf.split(self._separator) for msg in msgs[:-1]: for pattern in self._patterns: m = pattern.match(msg) if m: event = {'stream': stream, 'output': msg} for k, v in m.groupdict().items(): event['matched_%s' % k] = v self._probe.triEnqueueMsg(event) buf = msgs[-1] else: # No separator management. Notifies things as is. for pattern in self._patterns: m = pattern.match(buf) if m: event = {'stream': stream, 'output': buf} for k, v in m.groupdict().items(): event['matched_%s' % k] = v self._probe.triEnqueueMsg(event) buf = '' ProbeImplementationManager.registerProbeImplementationClass( "exec.interactive", InteractiveExecProbe)
offset = 0 if offset is None: # Nothing to do for the file return # OK, scan the file to get matching new lines self._probe.getLogger().debug("File %s changed since the last tick, starting at %d" % (filename, offset)) f = open(filename, 'r') f.seek(offset) newlines = f.readlines() f.close() # Technically, the file may have grown since we took the ref size # We should lock the file until we complete our analysis and reading # but the current implementation should be enough for typical probe usages for line in newlines: for pattern in self._patterns: m = pattern.match(line) if m: event = { 'filename': filename, 'line': line.strip() } # Should we strip the line ? for k, v in m.groupdict().items(): event['matched_%s' % k] = v self._probe.triEnqueueMsg(event) # A line can be matched only once. break # else no match ProbeImplementationManager.registerProbeImplementationClass("watcher.file", FileWatcherProbe)
else: ret = True return ret def _addBaseDn(self, dn): """ Append the base_dn property, if any """ baseDn = self['base_dn'] if baseDn: return ','.join(filter(lambda x: x.strip(), dn.split(',') + baseDn.split(','))) else: return dn def _stripBaseDn(self, dn): """ If the dn ends with the base_dn property, strips it. """ baseDn = self['base_dn'] if baseDn: baseDn = ','.join(filter(lambda x: x.strip(), self['base_dn'].split(','))) if dn.lower().endswith(baseDn.lower()): dn = dn[:-len(baseDn)] if dn.endswith(','): dn = dn[:-1] return dn ProbeImplementationManager.registerProbeImplementationClass('ldap.client', LdapClientProbe)