def __init__(self): """ Initialize tests """ Logger.getLogger("net.spy.memcached").setLevel(Level.DEBUG); self.clients = [] if USE_GLOBAL_CLIENT: # use global client self.client = global_client else: cfb = ConnectionFactoryBuilder() self.client = ArcusClient.createArcusClient(arcus_cloud, service_code, cfb) print 'Wait for per-thread client to be connected to Arcus cloud (%d seconds)' % DEFAULT_CONNECTION_WAIT Thread.currentThread().sleep(DEFAULT_CONNECTION_WAIT * 1000) self.flush_counter = 0 self.tests = [] # insert operations self.tests.append(Test(1, "KeyValue").wrap(self.KeyValue)) self.tests.append(Test(2, "Collection_Btree").wrap(self.Collection_Btree)) self.tests.append(Test(3, "Collection_Set").wrap(self.Collection_Set)) self.tests.append(Test(4, "Collection_List").wrap(self.Collection_List))
def run_command_plugins(plugin_point): if site_home: sys.path.append(site_home + '/custom/plugins') else: sys.path.append('custom/plugins') # run any global plugin for this command script_name = command_name + '_' + plugin_point + '.py' script_file = os.path.join(site_home, 'custom/plugins', script_name) if os.path.isfile(script_file): main_logger.info('running global ' + plugin_point + ' command plugin: ' + script_file) call_extension(script_file, Logger.getLogger(plugin_point + '_plugin')) # run any configured plugins for this command if rb_config is not None: plugins_list = rb_config.getProperty('plugins.' + command_name + '.' + plugin_point) if plugins_list is not None: for plugin in plugins_list.split(','): script_file = os.path.join(site_home, 'custom/plugins', plugin) if os.path.isfile(script_file): main_logger.info('Running configured ' + plugin_point + ' command plugin: ' + script_file) call_extension(script_file, Logger.getLogger('plugin')) else: build_file=find_command_build_file(os.path.join(site_home, 'custom/commands/ant'),plugin) if build_file: run_ant_target(build_file, plugin) else: main_logger.warn('Configured ' + plugin_point + ' command plugin script not found: ' + script_file)
def __init__(self): """ Initialize tests """ Logger.getLogger("net.spy.memcached").setLevel(Level.DEBUG) self.clients = [] if USE_GLOBAL_CLIENT: # use global client self.client = global_client else: cfb = ConnectionFactoryBuilder() self.client = ArcusClient.createArcusClient( arcus_cloud, service_code, cfb) print 'Wait for per-thread client to be connected to Arcus cloud (%d seconds)' % DEFAULT_CONNECTION_WAIT Thread.currentThread().sleep(DEFAULT_CONNECTION_WAIT * 1000) self.flush_counter = 0 self.tests = [] # insert operations self.tests.append(Test(1, "KeyValue").wrap(self.KeyValue)) self.tests.append( Test(2, "Collection_Btree").wrap(self.Collection_Btree)) self.tests.append(Test(3, "Collection_Set").wrap(self.Collection_Set)) self.tests.append( Test(4, "Collection_List").wrap(self.Collection_List))
def run_command_plugins(plugin_point): if site_home: sys.path.append(site_home + '/custom/plugins') else: sys.path.append('custom/plugins') # run any global plugin for this command script_name = command_name + '_' + plugin_point + '.py' script_file = os.path.join(site_home, 'custom/plugins', script_name) if os.path.isfile(script_file): main_logger.info('running global ' + plugin_point + ' command plugin: ' + script_file) call_extension(script_file, Logger.getLogger(plugin_point + '_plugin')) # run any configured plugins for this command if rb_config is not None: plugins_list = rb_config.getProperty('plugins.' + command_name + '.' + plugin_point) if plugins_list is not None: for plugin in plugins_list.split(','): script_file = os.path.join(site_home, 'custom/plugins', plugin) if os.path.isfile(script_file): main_logger.info('Running configured ' + plugin_point + ' command plugin: ' + script_file) call_extension(script_file, Logger.getLogger('plugin')) else: build_file = find_command_build_file( os.path.join(site_home, 'custom/commands/ant'), plugin) if build_file: run_ant_target(build_file, plugin) else: main_logger.warn('Configured ' + plugin_point + ' command plugin script not found: ' + script_file)
def deleteCustomer(mc): print 'Checking for existing test customer...' cm = mc.getCustomerManager() cust = cm.findCustomers([SearchConstraint(ICustomerManager.PROP_DOMAINS, SearchConstraintOperator.CONSTRAINT_LIKE_INSENSITIVE, '%enron.com%')]) if cust is not None and len( cust ) == 1: testCust = cust[0] Logger.getLogger( "cloud.py" ).info( "Customer with domain enron.com already existed, did a test not clean up? Deleting customer..." ) print 'Deleting customer systest_journaling_customer...' cm.deleteCustomers([testCust.getCustID()]); return
def __init__(self): self.logger = Logger.getLogger("wtThrowRocks") # self.logger.setLevel(Level.DEBUG) rocks = [ "Grain of Sand", "Gravel", "Pebble", "Stone", "Bolder" ] self.rock = rocks[randrange(len(rocks))] self.repeat = randrange(4)+1
def __init__(self): self.logger = Logger.getLogger("wtShootLasers") # self.logger.setLevel(Level.DEBUG) colors = [ "Red", "Gree", "Blue", "Pink", "Magenta" ] self.color = colors[randrange(len(colors))] self.repeat = randrange(4)+1
def testWorkers_Interrupt(self): self.logger = Logger.getLogger("TestWorkerThreadController") # self.logger.setLevel(Level.DEBUG) wc = MyWorkerController() # True: Create Conditional to test Interrupt. wc.createWorkers(MyWorker, 50, True) wc.startWorkers() # Check running state of threads. for t in wc.loopers: self.assert_(t.isAlive() == True) # Check initial state of validation variables. for t in wc.loopers: self.assert_(t.stopProc == False) self.assert_(t.wasStopped == False) self.assert_(t.wasInterrupted == False) self.logger.debug("=====> Sleeping...") time.sleep(3) wc.stopWorkersViaInterrupt() # Give threads some time to quiesce. time.sleep(1) # Check that all threads were stopped via sopProc flag. for t in wc.loopers: self.assert_(t.isAlive() == False) # Check that all threads are not in a running state. for t in wc.loopers: self.assert_(t.stopProc == False) self.assert_(t.wasStopped == False) self.assert_(t.wasInterrupted == True)
class TestProcessCommandsND(unittest.TestCase): logger = Logger.getLogger("Test") def setUp(self): self.cg = ProcessCommands() self.itemDict = CONFDICT_ND def tearDown(self): self.cg = None self.cmdList = None self.itemDict = None def testNoCommandsException(self): self.logger.info("TestCommandGeneratorND:testNoCommandsException") cmdDict = None try: self.cg.generateCommands(cmdDict) except Exception: pass else: fail("Expected an Exception") def testBogusKeyException(self): self.logger.info("TestCommandGeneratorND:testBogusKeyException") cmdDict = {'boguskey': 'bogusvalue'} try: self.cg.generateCommands(cmdDict) except Exception: pass else: fail("Expected an Exception")
def __init__(self, name, conf, admin): self.logger = Logger.getLogger("HBaseTable") # self.logger.setLevel(Level.DEBUG) self.conf = conf self.admin = admin self.name = name self._table = None
def __init__(self, queueSize): self.logger = Logger.getLogger("wtGundamWarriorController") # self.logger.setLevel(Level.DEBUG) self.queueSize = queueSize self.wq = wtWorkQueue(queueSize) self.workItemsCreated = 0;
def __init__(self): self.logger = Logger.getLogger("wtShootLasers") # self.logger.setLevel(Level.DEBUG) colors = ["Red", "Gree", "Blue", "Pink", "Magenta"] self.color = colors[randrange(len(colors))] self.repeat = randrange(4) + 1
def __init__(self, queueSize): self.logger = Logger.getLogger("wtGundamWarriorController") # self.logger.setLevel(Level.DEBUG) self.queueSize = queueSize self.wq = wtWorkQueue(queueSize) self.workItemsCreated = 0
def init(self, config): """servlet startup""" self.props = self.read_dspace_config(DSPACE_DIR + '/config/dspace.cfg') self.conn = zxJDBC.connect( self.props.getProperty('db.url'), self.props.getProperty('db.username'), self.props.getProperty('db.password'), self.props.getProperty('db.driver'), ) self.conn.autocommit = True self.cursor = self.conn.cursor() self.client = self.read_orcid_config_db() self.orcid_pattern = re.compile(r"^\d{4}-\d{4}-\d{4}-(\d{3}X|\d{4})$") self.token_pattern = re.compile( r"^[0-f]{8}-[0-f]{4}-[0-f]{4}-[0-f]{4}-[0-f]{12}$") self.available_scopes = [ '/authenticate', '/activities/update', '/person/update', '/read-limited', '/read-public', '/webhook', ] self.available_envs = [ 'sandbox', 'production', ] self.logger = Logger.getLogger("orcid/auth.py") self.logger.info("initialized")
def __init__(self, esClient, esLogger, retrySeconds = 90, bitBucket = None ): self.logger = Logger.getLogger("ElasticSearch.EsBulkReq") # self.logger.setLevel(Level.DEBUG) self.esLogger = esLogger self.esClient = esClient self.bulkReq = self.esClient.prepareBulk() self.reqCounter = 0 self.currentRequests = ArrayList() # bitBucket is an object that implements a bitBucketSubmit() method. # BulkRequest Actions that fail, if not re-tryable, are submitted # to the bitBucket. self.bitBucket = bitBucket # Create the bulkRequestRetry Queue. # This is required for supporting bulkRequestRetries for # situations where new indexes are not created on time (w/in 30seconds). # This index is eventually created, but, in the mean time, we would lose # the data from the inserts that failed, unless, we can retry them after N seconds. self.bulkReqRetryQueue = Queue.PriorityQueue() self.bulkRetryBatchNumber = 0 # This should be in a config file. self.bulkRetryDelaySeconds = retrySeconds # Needed for logging. self.threadName = 'EsBulkReq'
def __init__(self): self.logger = Logger.getLogger("wtThrowRocks") # self.logger.setLevel(Level.DEBUG) rocks = ["Grain of Sand", "Gravel", "Pebble", "Stone", "Bolder"] self.rock = rocks[randrange(len(rocks))] self.repeat = randrange(4) + 1
def __init__(self, hdfsUtil, dirPath, stat, pwFiles, buffSize=100, recurse=False, rewind=False): self.logger = Logger.getLogger("FileStatIterator") # self.logger.setLevel(Level.DEBUG) self.hu = hdfsUtil self.dirPath = dirPath self.stat = stat self.pwFiles = pwFiles self.buffSize = buffSize self.recurse = recurse self.rewind = rewind if pwFiles.size() > 0: self.firstVal = pwFiles[0] self.lastVal = pwFiles[pwFiles.size() - 1] else: self.firstVal = None self.lastVal = None
def setupCustomer(mc,island,domain,isCloud,isOnPremises,userAccounts,loadCorpus=False): deleteCustomer(mc) amptool = ActiveMailboxPartitionTool() emscmdtool = EmsCommandLineTool() print 'creating customer...' cm = mc.getCustomerManager() print 'executing amp cluster-location' amptool.runCommand('cluster-location',array(['-r'],String)) args = [] args.append("-f" ) args.append( str(island)) args.append("-s" ) args.append( str(island)) args.append("-d" ) args.append( domain) args.append( "-n" ) args.append("systest_journaling_customer" ) args.append("-e" ) args.append( "nobody@" + domain) args.append("-q") if loadCorpus: args.append( "-i" ) args.append( "/tmp/corpus") args.append( "-o" ) args.append( "/ems/bigdisk/sftp.drop") if isCloud: args.append("--guid") if isOnPremises: args.append( "--bcc" ) if userAccounts is not None: for address in userAccounts: args.append("--user-account" ) args.append( address + "@" + domain ) print "creating journal-enabled customer " Logger.getLogger( "cloud.py" ).info( "Creating systest_journaling_customer..." ) result = emscmdtool.runCommand('make-test-customer',args) if result is False: raise Exception('make-test-customer failed, customer already exists?') ManagementContainer.getInstance().getMailRoutingConfigGenerator().waitForRegenComplete() cust = cm.findCustomers([SearchConstraint(ICustomerManager.PROP_NAME, SearchConstraintOperator.CONSTRAINT_EQUALS, 'systest_journaling_customer')]) return cust[0]
def __init__(self, table, rowid): self.logger = Logger.getLogger("HBaseRow") # self.logger.setLevel(Level.DEBUG) # Must be a table object self.table = table # Must be a rowid self.rowid = rowid self.rowPut = Put(self.rowid)
def setUp(self): # Replik8db Properties... Replik8dbProps = Properties("conf/replik8db.properties") # Replik8db HDFS Cluster... Replik8dbHdfsCluster = Replik8dbProps.getProperty("cluster.hdfs") self.logger = Logger.getLogger("TestHdfsUtil") # self.logger.setLevel(Level.DEBUG) self.hu = HdfsUtil(Replik8dbHdfsCluster)
def _getVersion(self, testScriptFileName): from org.apache.log4j import Logger as _Logger, Level as _Level # set log4j logger level to ERROR rootLogger = _Logger.getRootLogger() rootLevel = rootLogger.getLevel() rootLogger.setLevel(_Level.ERROR) from com.qspin.qtaste.util.versioncontrol import VersionControl version = VersionControl.getInstance().getTestApiVersion(os.path.dirname(testScriptFileName)) rootLogger.setLevel(rootLevel) return version
def setUp(self): # Replik8db Properties... Replik8dbProps = Properties('conf/replik8db.properties') # Replik8db HDFS Cluster... Replik8dbHdfsCluster = Replik8dbProps.getProperty('cluster.hdfs') self.logger = Logger.getLogger("TestHdfsUtil") # self.logger.setLevel(Level.DEBUG) self.hu = HdfsUtil(Replik8dbHdfsCluster)
def __init__(self, tName, workerConfig): JThread.__init__(self) self.logger = Logger.getLogger("wtGundamWarrior") # self.logger.setLevel(Level.DEBUG) self.config = workerConfig self.wq = self.config.workerQueue self.threadName = tName self.stopThread = False self.wrkItmsProcessed = 0
def __init__(self, threadName, workerConfig): WorkerThread.__init__(self, threadName, workerConfig) self.logger = Logger.getLogger("ElasticSearch.EsLogger") # self.logger.setLevel(Level.DEBUG) self.workerConfig = workerConfig # Default Bulk Request Settings for ES Logging. # - Set to True to use bulk requests for logs. self.useBulkReq = False self.bulkReqCounter = 0 self.bulkReqExecCountTrigger = 1000 self.lastBulkReqFlush = datetime.now() if (threadName is None): # ==== 1st Instance (threadName is None) ==== # Get the EsLogger queue. # This object will feed the queue through this reference. self.wq = workerConfig.wq self.esNode = self.workerConfig.esNode else: # ==== 2nd Instance (threadName is not None) ==== self.esNode = self.workerConfig.esNode self.esClient = self.esNode.getClient() self.esBulkReq = EsBulkReq(self.esClient, None) self.indexName = workerConfig.indexName # If bulkReq config are set in the workerConfig object, use them. if workerConfig.useBulkReq is not None: self.useBulkReq = workerConfig.useBulkReq if workerConfig.bulkReqExecCountTrigger is not None: self.bulkReqExecCountTrigger = workerConfig.bulkReqExecCountTrigger # Json SerDe objects self.boon = BoonJson() self.esLoggerWorker = None self.esLoggerThread = None self.stopThread = False self.threaded = False self.dtfmt = SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ") if threadName is None: self.threadName = "EsLoggerController" # Startup the Background thread. self.startEsLoggerThread() else: self.threadName = threadName
def __init__(self, hdfsCluster): self.logger = Logger.getLogger("Hdfs") # self.logger.setLevel(Level.DEBUG) coreSite = "/etc/hadoop/conf/core-site.xml" hdfsSite = "/etc/hadoop/conf/hdfs-site.xml" hdfsCluster = hdfsCluster self.cHdfs = Configuration() self.cHdfs.addResource(Path(coreSite)) self.cHdfs.addResource(Path(hdfsSite)) self.cHdfs.set("fs.defaultFS", hdfsCluster) self.fileSystem = FileSystem.get(self.cHdfs) self.fileUtil = FileUtil()
def initialize_log4j(): try: from org.apache.log4j import (ConsoleAppender, Level, Logger, PatternLayout) rootLogger = Logger.getRootLogger() print(dir(rootLogger)) rootLogger.level = Level.DEBUG layout = PatternLayout("%d{ISO8601} [%t] %-5p %c %x - %m%n") appender = ConsoleAppender(layout) rootLogger.addAppender(appender) except: import traceback print(traceback.format_exc())
def __init__(self): self.name = 'RedisPoolTest' self.log = Logger.getLogger(self.name) self.pool = ServiceRegistry.getService("testRedisPool") if not self.pool: self.log.info(self.name + ': creating pool...') self.pool = redis.ConnectionPool(host='127.0.0.1', port=6379) try: ServiceRegistry.registerService("testRedisPool", self.pool) finally: pass self.r = redis.Redis(connection_pool=self.pool) self.log.info(self.name + ': init...')
def run_validators(): config_valid = True validation_logger=Logger.getLogger('validation') for val_dir in ['core/validators', os.path.join(site_home, 'custom/validators')]: sys.path.append(val_dir) for val_name in os.listdir(val_dir): val_file = os.path.join(val_dir, val_name) if os.path.isfile(val_file) and is_jython(val_file): main_logger.debug('Running validator: ' + val_file) result = call_extension(val_file, validation_logger) if result == False: config_valid = False if not config_valid: main_logger.error('There were validation errors with the configuration file\n\nBUILD FAILED - Invalid Configuration\n') sys.exit()
def __init__(self, tName, workerConfig): JThread.__init__(self) self.logger = Logger.getLogger("MyWorker") # self.logger.setLevel(Level.DEBUG) self.threadName = tName self.thread = None self.stopProc = False self.wasStopped = False self.wasInterrupted = False self.config = workerConfig self.loops = self.config.loops self.cvTest = self.config.cvTest self.cv = self.config.cv self.cvWait = self.config.cvWait self.logger.debug("Looping Worker Created: {}".format(tName))
def __init__(self): self.name = 'PoolConTest' self.log = Logger.getLogger(self.name) ds = DataSourceRegistry.getDataSource('localSource') if not ds: props = Properties() props.put("url", "jdbc:mysql://localhost/dataservice") props.put("username", "root") props.put("driverClassName", "com.mysql.jdbc.Driver") props.put("password", "ben") props.put("initialSize", "1") props.put("minIdle", "5") props.put("maxActive", "10") try: DataSourceRegistry.registerDataSource('localSource', props) finally: ds = DataSourceRegistry.getDataSource('localSource') self.ds = ds self.log.info(self.name + ': init...')
class TestProcessConfigBASE(unittest.TestCase): logger = Logger.getLogger("Test") def setUp(self): os.makedirs(props['tmpPath']) self.pc = ProcessConfig() self.envname = "MP66" self.itemDict = CONFDICT_BASE self.xml = CONFXML_BASE def tearDown(self): self.pc = None self.envname = None self.itemDict = None self.xml = None shutil.rmtree(props['tmpPath']) # def testXMLReadConfig(self): # self.assertEqual(self.itemDict, self.pc.readConfig(xml=self.xml)) def testXMLFileReadConfig(self): #fh = open(props['tmpPath'] + os.sep + 'env.xml','w') #fh.write(self.xml) #fh.close() #fh = open(props['tmpPath'] + os.sep + 'env.xml','r') #fhStr = props['tmpPath'] + os.sep + 'env.xml' #self.assertEqual(self.itemDict, self.pc.readConfig(fh=fhStr)) #fh.close() fhStr = props['confPath'] + os.sep + 'sample.xml' self.assertEqual(self.itemDict, self.pc.readConfig(fh=fhStr)) #fh.close() def testXMLReadException(self): xml = None try: self.pc.readConfig(xml=xml), except Exception: pass else: fail("Expected an Exception")
class TopicService(com.xuechong.bootstraptests.service.TopicService, DaoSupport): logger = Logger.getLogger("TopicService py Impl") startIndex = lambda x: x > 0 and ((x - 1) * 6) or 0 def __init__(self): """public TopicServie()""" if self.logger.isInfoEnabled(): self.logger.info(str(type(self)) + "init ") print str(type(self)) + "init " def add(self, entity): """ if you put this method in the super class it will not be executed....why? """ self.getSession().save(entity) def list(self, page): hql = "FROM Topic ORDER BY createDate DESC" query = self.getSession().createQuery(hql) query.setFirstResult(self.getIndex(page)) query.setMaxResults(6) return query.list() def totalPage(self): rows = self.getSession().createQuery( "SELECT COUNT(t.id) FROM Topic t").uniqueResult() return rows % 6 != 0 and (rows / 6 + 1) or rows / 6 def remove(self, id): print "py remove " + id def findById(id): print "py find by id" return None def findTop(top): print "py find top" + str(top) return []
def run_validators(): config_valid = True validation_logger = Logger.getLogger('validation') for val_dir in [ 'core/validators', os.path.join(site_home, 'custom/validators') ]: sys.path.append(val_dir) for val_name in os.listdir(val_dir): val_file = os.path.join(val_dir, val_name) if os.path.isfile(val_file) and is_jython(val_file): main_logger.debug('Running validator: ' + val_file) result = call_extension(val_file, validation_logger) if result == False: config_valid = False if not config_valid: main_logger.error( 'There were validation errors with the configuration file\n\nBUILD FAILED - Invalid Configuration\n' ) sys.exit()
def setUp(self): self.logger = Logger.getLogger("ElasticSearch.TestEsBulkReq") # self.logger.setLevel(Level.DEBUG) self.logger.debug("running test: TestEsBulkReq") # Pull date from out test properties file... esTestProps = Properties('./lib/ElasticSearchTest.properties') esTestCluster = esTestProps.getProperty('esTest.cluster') esTestHosts = esTestProps.getProperty('esTest.hosts') esTestNodeName = esTestProps.getProperty('esTest.node.name') esTestIndex = esTestProps.getProperty('esTest.index') # Create the EsNode instance. self.esNc = EsNode(esTestCluster, esTestHosts, esTestNodeName) # 1st create the EsLoggerConfig object with # indexName and a new EsLoggerQueue. esLoggerConfig = EsLoggerConfig(self.esNc, esTestHosts, esTestIndex, EsLoggerQueue(1000)) esLoggerConfig.useBulkReq = False # Create the EsLogger instance self.esLogger = EsLogger(None, esLoggerConfig)
def __init__(self, clusterName, unicastHosts, nodeName): self.logger = Logger.getLogger("EsNode") # self.logger.setLevel(Level.DEBUG) cluster = clusterName # Get this hosts IP address ip = " ".join(socket.gethostbyaddr(socket.gethostname())[2]) self.logger.info("Chicle joining cluster [{}] as node.name [{}] ip [{}]".format(cluster, nodeName, ip)) esSets = ImmutableSettings.settingsBuilder() esSets = esSets.put('node.name', nodeName) esSets = esSets.put('client.transport.sniff', True) esSets = esSets.put("http.enabled", "false") esSets = esSets.put("transport.tcp.port", "9300-9400") esSets = esSets.put("discovery.zen.ping.multicast.enabled", "false") esSets = esSets.put("discovery.zen.ping.unicast.hosts", unicastHosts) esSets = esSets.put("network.host", ip) self.node = nodeBuilder().clusterName(cluster).client(True).settings(esSets).node() if (self.node is None): self.logger.error("Failed to create ESNode")
def setup_command_logger(): return Logger.getLogger('ConfigNOW.' + command_name)
commonModule = '1.0.1' try: scriptConfigProperties except NameError: scriptConfigProperties = None try: replaceFlag except NameError: replaceFlag = None try: log except NameError: log = Logger.getLogger('ConfigNOW') log.debug('Loading module [common.py] version [' + commonModule + ']') #======================================================================================= # Error class for script errors #======================================================================================= class ScriptError(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg)
from org.apache.log4j import Logger from java.io import FileInputStream from java.io import FileOutputStream from java.util import Properties from java.lang import String import sys import re import os import global_platform execfile('core/commands/password_encrypter.py') log = Logger.getLogger('config_loader') data_linage = Properties() def addPropertiesFromFile(props, filename, site_home): addProps = Properties() input = FileInputStream(filename) addProps.load(input) input.close() baseFileList = addProps.getProperty('base') if not baseFileList is None: baseFiles = baseFileList.split(",") for baseFile in baseFiles: baseFileResolved = getBaseFile(baseFile, site_home) if baseFileResolved == '':
func(True) #turn on function when sensor goes high else : logger.debug("set "+self.fnKey+" OFF for " + str(self.throttle.getLocoAddress())) func(False) #turn off function when sensor goes low return class Automaton(jmri.jmrit.automat.AbstractAutomaton) : #perform actions that need to be in a thread, such as loco acquisition def init(self): # logger.debug("Inside Automaton.init("+self.sensorName+","+str(self.throttleAddress)+","+self.fnKeyName+")") self.throttle = self.getThrottle(self.throttleAddress, True) # actually attach the sensor to the loco ThrottleFunctionForSensorListener().setup(self.sensorName, self.throttle, self.fnKeyName) return #pass and store needed values for this instance, then start the thread def setup(self, sensorName, throttleAddress, fnKeyName): self.sensorName = sensorName self.throttleAddress = throttleAddress self.fnKeyName = fnKeyName self.start() self.waitMsec(500) #give it a chance to happen #################################################################### logger = Logger.getLogger("jmri.jmrit.jython.exec.ThrottleFunctionForSensor") #connect each sensor to its loco and function, repeat as needed Automaton().setup("LS1003", 909, "F1") #horn for address 909 Automaton().setup("LS1004", 909, "F0") #lights for same loco
def _testWarriors(self, queSize, numWarriors, feedSecs): self.logger = Logger.getLogger("TestwtGundamWarriorController") # self.logger.setLevel(Level.DEBUG) # - Queue size. gwCtrl = wtGundamWarriorController(queSize) # - Number of workers. gwCtrl.createWorkers(numWarriors) # Check running state of threads. for t in gwCtrl.warriors: self.assert_(t.isAlive() == False) # Check initial state of validation variables. for t in gwCtrl.warriors: self.assert_(t.stopThread == False) # Verify that the worker queue is empty. self.assert_(gwCtrl.wq.empty()) # Start the worker threads. gwCtrl.startWorkers() # Wait a bit to ensure that the workers startup. time.sleep(0.05) # Check running state of threads. for t in gwCtrl.warriors: self.assert_(t.isAlive() == True) # Check initial state of validation variables. for t in gwCtrl.warriors: self.assert_(t.stopThread == False) # Have the Workercontroler start pushing work into the # WorkerQueue. # - Run for N seconds... gwCtrl.controllTheWorkers(feedSecs) # Sleep and let the work queue empty. time.sleep(3) gwCtrl.stopOrInterruptWarriors() # Setting the stopthread flag won't immediately stop threads that # are blocking for a workItem, but will eventually stop if the # Queue.get is blocking with a timeout. gwCtrl.interruptWorkers() # Allow threads to quiesce. time.sleep(0.05) # Check running state of threads. for t in gwCtrl.warriors: self.assert_(t.isAlive() == False) # Check initial state of validation variables. for t in gwCtrl.warriors: self.assert_(t.stopThread == True) # Verify that the worker queue is empty. self.assert_(gwCtrl.wq.empty()) # Check on the number of work items generated. wiCreated = gwCtrl.workItemsCreated # Count the number of work items processed by workers, which should equal # the number of work items generated. wiProcessed = 0 for t in gwCtrl.warriors: wiProcessed = wiProcessed + t.wrkItmsProcessed self.logger.debug("WorkItems - Created:[{}] Processed:[{}]".format( wiCreated, wiProcessed)) self.assert_(wiCreated == wiProcessed)
BYTES_COLUMN = 7 # if success, how many bytes in body? RESOLVE_TIME_COLUMN = 9 # CONNECT_TIME_COLUMN = 10 # FIRST_BYTE_TIME_COLUMN = 11 # # other constants ALL_TRANSACTIONS_KEY = "0" ALL_TRANSACTIONS_VALUE = "All Transactions" TX_SEC_KEY = "passed" THROUGHPUT_KEY = "kbSec" SIMPLE_RESPONSE_TIME_KEY = "simple response time" HTTP_RESPONSE_TIME_KEY = "http response time" RESPONSE_TIME_MEAN_MAX_KEY = "response time mean max" TABLE_MARKER = "Final statistics for this process" SUPPORTED_JYTHON_VERSIONS = ["2.2.1", "2.5.0", "2.5.1", "2.5.2"] TEST_START_TIME = long(0) MAX_POSSIBLE_TIME = 99999999.9 logger = Logger.getLogger("analyzer") # velocity merger and config classes instantiated in # constants module to avoid creating duplicate instances VORPAL = ga.constants.VORPAL CONFIG = ga.constants.CONFIG if __name__ == "__main__": main()
CollectorsParameters.getDiscoveryResourceFolder(), FILE_SEPARATOR, ADAPTER_NAME) adapterResBaseDirFile = File(adapterResBaseDir) # discoveryResources\CaCmdbPushAdapter\work adapterResWorkDir = "%s%s%s" % (adapterResBaseDir, FILE_SEPARATOR, WORK_DIR) adapterResWorkDirFile = File(adapterResWorkDir) # discoveryConfigFiles\CaCmdbPushAdapter adapterConfigBaseDir = "%s%s%s%s" % ( CollectorsParameters.BASE_PROBE_MGR_DIR, CollectorsParameters.getDiscoveryConfigFolder(), FILE_SEPARATOR, ADAPTER_NAME) # logs slogger = Logger.getLogger("successLogger") flogger = Logger.getLogger("failureLogger") #UCMDB ID to Atrium ID cache id_cache = {} ''' Method Definitions ''' def isNoneOrEmpty(s): return (s == None or s == "") ''' Method to create a connection object with host, port, username & password
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # # FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS # END USER LICENSE AGREEMENT (ELUA). # # ============================================================================ import os import sys import validation_helper as helper from java.io import File from java.lang import System from org.apache.log4j import Logger log = Logger.getLogger('validation') def sanityCheckDomainConfig(domainProperties): error = 0 username = domainProperties.getProperty('wls.admin.username') if username is None or len(username) == 0: error = 1 log.error( 'Please verify wls.admin.username property if it exists in configuration.' ) else: log.debug('Admin username [' + str(username) + '] is valid.')
network_name = a elif o == "-s": source_core_id = eval(a) elif o == "-d": dest_core_id = eval(a) class NullOutputStream(java.io.OutputStream): def write(*args): pass real_out=java.lang.System.out java.lang.System.setOut(java.io.PrintStream(NullOutputStream())) from org.apache.log4j import Logger,Level logger=Logger.getLogger(ca.nengo.util.Memory) logger.setLevel(Level.FATAL) class SpikeSender(nef.SimpleNode): def __init__(self,name,ensemble): nef.SimpleNode.__init__(self,name) self.ensemble=ensemble self.datastream=java.io.DataOutputStream(real_out) def tick(self): data=self.ensemble.getOrigin('AXON').getValues().getValues() for i,spike in enumerate(data): if spike: self.datastream.writeBytes(struct.pack('<I',i+2048)) self.datastream.writeInt(0xFFFFFFFF)
def __init__(self): self.logger = Logger.getLogger("MyWorkerController")
def __init__(self): self.logger = Logger.getLogger("uk.ac.rdg.resc.ncwms.CacheWiper") self.timeLastRan = time.time() # Will be used as UpdateSequence in capabilities doc
class TestInstall(unittest.TestCase): logger = Logger.getLogger("Test") def setUp(self): self.ti = Install() self.installHome = "C:/ibm/websphere7_64" self.noInstallHome = "C:/ibm/websphere7_64_XX" self.mediaHome = "O:/Internal IT/Software – Public/IBM/was_nd_7/wintel64/disk1/WAS" def tearDown(self): if os.path.exists("%s/uninstall/uninstall.exe" % self.noInstallHome): self.logger.info("Cleaning up test WebSphere installation at: %s" % self.noInstallHome) os.system('%s/uninstall/uninstall -silent -OPT removeProfilesOnUninstall="true"' % self.noInstallHome) shutil.rmtree(self.noInstallHome) self.ti = None self.installHome = None self.noInstallHome = None self.mediaHome = None def testCheckExistingInstallDetected(self): '''testCheckExistingInstallDetected''' self.logger.info("TestInstall:testCheckExistingInstallDetected") self.isWebSphereInstalled = True self.assertEqual(self.isWebSphereInstalled, self.ti.isWebSphereInstalled(self.installHome)) def testCheckNonExistingInstallDetected(self): '''testCheckNonExistingInstallDetected''' self.logger.info("TestInstall:testCheckNonExistingInstallDetected") self.isWebSphereInstalled = False self.assertEqual(self.isWebSphereInstalled, self.ti.isWebSphereInstalled(self.noInstallHome)) def testNoInstallHomeException(self): '''testNoInstallHomeException''' self.logger.info("TestInstall:testNoInstallHomeException") try: self.ti.isWebSphereInstalled() except Exception: pass else: fail("Expected an Exception") def testNullInstallHomeDirException(self): '''testNullInstallHomeDirException''' self.logger.info("TestInstall:testNullInstallHomeDirException") try: self.ti.installWebSphereBase(mediaHome=self.mediaHome) except Exception: pass else: fail("Expected an Exception") def testNullMediaHomeDIR(self): '''testNullMediaHomeDIR''' self.logger.info("TestInstall:testNullMediaHomeDIR") try: self.ti.installWebSphereBase(installHome=self.installHome) except Exception: pass else: fail("Expected an Exception") def testInstallBase(self): '''testInstallBase''' self.logger.info("TestInstall:testInstallBase") self.isInstallSucess = True self.assertEquals(self.isInstallSucess, self.ti.installWebSphereBase(installHome=self.noInstallHome, mediaHome=self.mediaHome))
''' Created on Feb 19, 2013 @author: undesa ''' from org.apache.log4j import Logger import os from const import ( __legislature_info__, __parl_info__ ) LOG = Logger.getLogger("glue") class COLOR(object): """ Color definitions used for color-coding significant runtime events or raised exceptions as applied on python print() function """ HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m'
# located in the JMRI program directory: 'log4j.category.jmri.jmrit.jython.exec=DEBUG' # # Author: Oscar Moutinho ([email protected]), 2016 - for JMRI ################################################################################## #::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # imports, module variables and imediate running code import java import socket import threading import time from org.apache.log4j import Logger import jmri TcpPeripheral_log = Logger.getLogger("jmri.jmrit.jython.exec.TcpPeripheral") CONN_TIMEOUT = 3.0 # timeout (seconds) MAX_HEARTBEAT_FAIL = 5 # multiply by CONN_TIMEOUT for maximum time interval (send heartbeat after CONN_TIMEOUT * (MAX_HEARTBEAT_FAIL / 2)) #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # get gpio and id from turnout or sensor system name def TcpPeripheral_getGpioId(sysName): gpio = None id = None _sysName = sysName.split(":") if len(_sysName) == 2 or len(_sysName) == 3: _gpio = _sysName[0].split("$") if len(_gpio) == 2: try: gpio = int(_gpio[1])
def setup_main_logger(): PropertyConfigurator.configure('log4j.properties') return Logger.getLogger('ConfigNOW')
import os, sys, time from com.m1.ems.mgmt import ManagementContainer from com.m1.ems.mgmt import ICustomerManager from com.m1.ems.mgmt import Customer from com.m1.ems.mgmt import Capabilities from com.m1.ems.mgmt import LockManager from org.apache.log4j import Logger from com.m1.ems.mgmt.mailrouting import ConfigGenerator from com.m1.ems.mgmt.mailrouting import NextHopsUpdater OK = 0 ERROR_CUSTOMER_NOT_FOUND = 1 ERROR_INVALID_COMMAND_LINE = 2 ERROR_FAILED_TO_UPDATE_NEXTHOPS = 3 LOGGER = Logger.getLogger( "setupNextHops.py" ); def getCustomer( customerId ): customer = ManagementContainer.getInstance().getCustomerManager().getCustomer( customerId ) if customer is None: print 'Customer ', customerId, ' was not found.' sys.exit( ERROR_CUSTOMER_NOT_FOUND ) return customer def configureNextHopRouting( customer, domains ): print 'Configuring next hop routing with ' + str( len(domains) ) + ' hops:' for hop in domains: print ' ' + str( hop ) hopsUpdater = NextHopsUpdater() LOGGER.info( 'Start config next hop routing' )
test = Test(1, 'Log') # setup the ScribeAppender appender = ScribeAppender() appender.setName('scribe') appender.setThreshold(Level.INFO) appender.setLayout(PatternLayout('%p - %m')) appender.setRemoteHost('scribe.la.devbln.europe.nokia.com') appender.setLocalHostname('test host') errorHandler = ScribeStatisticsErrorHandler() appender.setErrorHandler(errorHandler) logger = Logger.getLogger('grinder') class TestRunner: def __call__(self): wrappedAppender = test.wrap(appender) # append test log message message = "Test log message: %s, %s" % (grinder.threadNumber, grinder.runNumber) event = LoggingEvent('grinder', logger, Level.INFO, message, None) # delay reporting stats grinder.statistics.delayReports = 1 error = wrappedAppender.appendAndGetError(event)