def get(self, filename): util.getLogger(__name__).debug("CSS name: %s" % filename) css = db.Query(CSSFile).filter('default =', True).filter('filename =', filename).fetch(1) if len(css) > 0: util.getLogger(__name__).debug("CSS found in DS") self.generate('css.html', { 'css' : css[0] }) else: self.generate('css.html', {})
def GetMysqlCursor(): conn = cursor = None conn = GetMysqlConn() if conn is None: return None, None try: cursor = conn.cursor() except MySQLdb.MySQLError, e: util.getLogger().critical( "error(%d:%s): cursor()"%(e.args[0], e.args[1]))
def main(): logger = util.getLogger(__name__) out_hdlr = logging.StreamHandler(sys.stdout) out_hdlr.setFormatter(util.ColorLevelFormatter('%(message)s')) out_hdlr.setLevel(logging.INFO) logger.addHandler(out_hdlr) modulePath = os.path.dirname(os.path.abspath(__file__)) config = configfile.getClusterSnapshotConfig( os.path.join(modulePath, 'snapshot.conf'), sys.argv[1:]) logger.info("Starting Cluster Snaphot...") sourceDir = config.get('SOURCE_DIR', '') sourceFile = '' if os.path.isfile(os.path.join(sourceDir, "profile.platform")): sourceFile = util.sourceEnv(os.path.join(sourceDir, 'profile.platform')) elif os.path.isfile(os.path.join(sourceDir, "profile.lsf")): sourceFile = util.sourceEnv(os.path.join(sourceDir, 'profile.platform')) snapFile = runClsSnapshot(config) if snapFile: logger.info('Cluster snapshot %s has been created.', snapFile) else: logger.info('Failed to generate a cluster snapshot.') exit(-1)
def save(config, lsfConfig, path='/tmp'): logger = util.getLogger(__name__) if 'LSF_LOGDIR' in lsfConfig: logDir = lsfConfig.get('LSF_LOGDIR', '') if os.path.isdir(logDir): lsfDC = Data_Collect(path, __name__) tempFolder = tempfile.mkdtemp(prefix='lc_') args = [ os.path.join(sys.path[0], 'log_collector.py'), 'deploy', tempFolder, socket.gethostname(), 'lsf' ] log_collector = imp.load_source( __name__ + '.log_collector', os.path.join(os.path.dirname(os.path.realpath(__file__)), 'log_collector.py')) log_collector.main(args) for fname in iglob(os.path.join(tempFolder, '*.lsf.logs.*.tar.gz')): lsfDC.moveit(fname) util.rmtree(tempFolder) else: logger.error("LSF_LOGDIR directory '%s' does not exist.", logDir) else: logger.warn( 'LSF_LOGDIR is not set in lsf.conf. No logs will be collected.')
def __init__(self, server_address, RequestHandlerClass, servernumber, bind_and_activate=True): HTTPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate=bind_and_activate) self.log = util.getLogger() self.servernumber = servernumber self.cloudiness_cache = {} self.cloudiness_cache_misses = 0 self.cloudiness_cache_hits = 0 self.image_cache = {} self.image_cache_size = 0 self._cpu_usage = 0.0 self._cpu_prev_idle = 0.0 self._cpu_prev_total = 0.0 self._cpu_usage_last_100_samples = [] # Shared object is expected to reside in lib/libcloudiness.so LIB_FILENAME = "libcloudiness.so" LIBPATH = os.path.join(constants.ROOT_DIR, "lib", LIB_FILENAME) # Load the shared object. This object has been compiled with some # particular flags. Look at the Makefile accompanying it. self.libcloudiness = ctypes.cdll.LoadLibrary(LIBPATH) # This is one of the functions declared in the library self.calc_cloudiness = self.libcloudiness.calc_cloudiness self.calc_cloudiness.restype = ctypes.c_double
def save(config, lsfConfig, path='/tmp'): logger = util.getLogger(__name__) if 'LSB_SHAREDIR' not in lsfConfig: logger.error("LSB_SHAREDIR is not defined in lsf.conf!") return False lsfShareDir = lsfConfig['LSB_SHAREDIR'] lsfData = Data_Collect(path, __name__) lsid_out = lsfData.runit('lsid') ClName = '' for line in lsid_out.splitlines(): if "My cluster name is" in line: ClName = line.split(' ')[4] break if not ClName: logger.error("No cluster can be found!") return False lsfWorkDir = os.path.join(lsfShareDir, ClName, 'logdir') eventNum = config.get('EVENTS_NUM', 0) if eventNum: eventNum = int(eventNum) else: eventNum = 0 lsfData.copyit(os.path.join(lsfWorkDir, 'lsb.events'), 'work/') i = 0 while i < eventNum: i += 1 lsfData.copyit(os.path.join(lsfWorkDir, 'lsb.events' + '.' + str(i)), 'work/') lsfData.copyit(os.path.join(lsfWorkDir, 'lsb.acct'), 'work/')
class Ra3GsLoginServerFactory(ServerFactory): protocol = Ra3GsLoginServer log = util.getLogger('gamespy.ra3Serv', self) def buildProtocol(self, addr): p = ServerFactory.buildProtocol(self, addr) p.theater = Theater.getTheater(gameId) return p
def save(config, path='/tmp/S_SHUT'): modName = os.path.splitext(os.path.basename(__file__))[0] savePath = os.path.join(path, modName) logFileName = modName + '.log' logger = util.getLogger(__name__) if os.path.exists(savePath): dt = strftime("%Y-%m-%d:%H:%M:%S", gmtime()) os.rename(savePath, savePath + '.' + dt) try: os.makedirs(savePath) except IOError as e: if e.errno == errno.EEXIST: pass else: logger.exception("Could not create directory path '%s'.", savePath) return thisModPath = os.path.dirname(os.path.abspath(__file__)) sys.path.append(thisModPath) debugLevel = config.get('SYM_LOG_LEVEL') LOGFILE = os.path.join(savePath, logFileName) formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(name)s.%(funcName)s() - %(message)s') fHandle = logging.FileHandler(LOGFILE) fHandle.setFormatter(formatter) if debugLevel: try: fHandle.setLevel(getattr(logging, debugLevel.strip().upper())) except TypeError: pass logger.addHandler(fHandle) symModules = configfile.getModuleList(config.get('SYM_MDLS', allMods), allMods) sym_mods = [] myScriptPath = os.path.dirname(os.path.realpath(__file__)) for mod in symModules: try: sym_mods.append( imp.load_source(__name__ + '.' + mod, os.path.join(myScriptPath, mod + '.py'))) except ImportError: logger.exception( "Error loading module as defined in CLI parameter '--sym_mdls' or snapshot.conf SYM_MDLS " "parameter. Check that the module %s is valid.", mod) if not os.getenv('SOAM_HOME', ''): logger.warn( 'Cannot find $SOAM_HOME in the environment. Data collection will be incomplete.' ) for mod in sym_mods: mod.save(savePath) fHandle.close() logger.removeHandler(fHandle)
def __init__(self, tweets): self.twitterHandles = [] self.trends = [] self.categories = [] self.indexLists = [] self.logger = util.getLogger("populate_feed.InvertedIndex") self.totalTweets = 0 self._populate(tweets)
def __init__(self, peer_port = PeerServer.DEFAULT_PORT, \ data_port = DataServer.DEFAULT_PORT, \ data_transfer_protocal = 'tcp', p2p_prefix = None, callbacks = {}): ''' Constructor *peer_port*: int. Peer server service port. *data_port*: int. Data server service port. *data_transfer_protocal*: string. data transfer protocal. supports 'tcp', 'http', 'udp' *p2p_prefix*: str. the protocal prefix to identify the peer data package. *callbacks*: the external callback functions to handle events. you need to parse the data depends your business. "register" - register callback. Be invoked after a peer registered to me. "query" - query callback. to parse query string and perform query. "message" - message callback. to populate the message when received. "action" - action callback. to parse and perform the action. *callback returns*: *"query"* callback should return (resource, service_protocal, service_port). "resource" identify how to get the resource. "service_protocal" is the transfer protocal(http,tcp,udp) to serving the resource. "service_port" is the port to serving the resource. ''' self.log = util.getLogger('ConnManager(%d)' % peer_port) if p2p_prefix: P.setPrefix(p2p_prefix) self.peer_port = peer_port self.data_port = data_port self.peerServer = PeerServer(('0.0.0.0', peer_port), PeerServerHandler) self.ip = self.peerServer.server_address[0] # initialize internal and external callbacks self.callbacks = callbacks #external callbacks # init peer callbacks peerCallbacks = { 'register' : self._on_register, 'query' : self._on_query, 'message' : self._on_message, 'action' : self._on_action, } self.peerServer.init(peerCallbacks) #internal callbacks self.log.info("P2P Sever initialized on %s:%d:%d" % (self.ip, self.peer_port, self.data_port)) dataCallbacks = { 'connect' : self._on_connect, 'transfer' : self._on_transfer, 'disconnect' : self._on_disconnect, 'resource' : self._on_resource, 'signature' : self._on_signature, } self.dataServer = DataServer(port=data_port, protocal=data_transfer_protocal, callbacks=dataCallbacks)
def GetMysqlConn(): try: cfg = ConfigParser.ConfigParser() cfg.read("/var/www/webot/blog/config.conf") db_host = cfg.get("db", "host") db_user = cfg.get("db", "user") db_passwd = cfg.get("db", "passwd") db_default = cfg.get("db", "default_db") conn = MySQLdb.connect(host=db_host, user=db_user, passwd=db_passwd, db=db_default) conn.autocommit(True) conn.query("set names 'utf8'") except MySQLdb.MySQLError, e: util.getLogger().critical("error(%d:%s): connect"%(e.args[0], e.args[1])) return None
def save(config, lsfConfig, path='/tmp'): logger = util.getLogger(__name__) lsfConfCp = Data_Collect(path, __name__) lsfConfDir = lsfConfig.get('LSF_CONFDIR', '') if lsfConfDir: lsfConfCp.copyit(lsfConfig['LSF_CONFDIR']) else: logger.error("Could not determine the LSF configuration dir. Failed to collect LSF configuration files.") lsfConfCp.copyit('/etc/lsf.sudoers') for fname in iglob('/etc/init.d/*lsf*'): lsfConfCp.copyit(fname, 'initd/')
def save(path='/tmp'): logger = util.getLogger(__name__) egoConfDir = os.getenv("EGO_CONFDIR") if not egoConfDir: logger.error('Could not determine the EGO conf directory. Failed to collect EGO work directory files.') return egoWorkDir = os.path.join(egoConfDir, '..', 'work') if not os.path.exists(egoWorkDir): logger.error('The EGO work directory %s does not exist. Failed to collect EGO work directory files.', egoWorkDir) return egoConfCp = Data_Collect(path, __name__) egoConfCp.copyit(egoWorkDir)
def save(path='/tmp'): logger = util.getLogger(__name__) symDC = Data_Collect(path, __name__) output = symDC.runit('soamview app -s enabled') if output: lines = output.splitlines() if lines[0].startswith('APPLICATION'): lines.pop(0) for line in lines: app = line.split()[0] for task in appTasks: symTasks.append(task % app) for cmd in symTasks: logger.debug("Calling %s ..." % cmd) symDC.saveit(cmd)
def __init__(self, port=DEFAULT_PORT, protocal='tcp', callbacks={}): ''' Constructor. *port*: int. service port for DataPeer *protocal*: string. data transfer protocal. supports 'tcp', 'http', 'udp' *callbacks*: map. callback functions to process the data your self. ''' self.log = util.getLogger('DataServer(%d)' % port) if not protocal in DataServer.SUPPORT_PROTOCALS: self.log.error = '%s is not a supported data server protocal' raise 'Not supported data transfer protocal' pass self.protocal = protocal self.port = port self.callbacks = callbacks self.log.info('Data server (%s) created.' % protocal.upper())
def save(path='/tmp'): logger = util.getLogger(__name__) hosts = set() egoConfDir = os.getenv('EGO_CONFDIR', '') if not egoConfDir: logger.error( 'Could not determine the EGO conf directory. Failed to collect Symphony log files.' ) return egoConfFile = os.path.join(egoConfDir, 'ego.conf') if not os.path.isfile(egoConfFile): logger.error( "Could not find ego.conf. Failed to collect Symphony log files.") return config = configfile.getConfigFromFile(egoConfFile) try: for candidate in config['EGO_MASTER_LIST'].strip('"').split(): hosts.add(socket.getfqdn(candidate)) except: pass egoDC = Data_Collect(path, __name__) out = egoDC.runit('egosh rg ManagementHosts') if out: for line in out.splitlines(): if line.startswith('Resource List:'): for host in line.split(':', 1)[-1].split(): hosts.add(socket.getfqdn(host)) break tempFolder = tempfile.mkdtemp(prefix='lc_') hostList = ' '.join(hosts) args = [ os.path.join(sys.path[0], 'log_collector.py'), 'deploy', tempFolder, hostList, 'ego' ] log_collector = imp.load_source( __name__ + '.log_collector', os.path.join(os.path.dirname(os.path.realpath(__file__)), 'log_collector.py')) log_collector.main(args) for filePattern in iglob(os.path.join(tempFolder, '*.ego.logs.*.tar.gz')): egoDC.moveit(filePattern) util.rmtree(tempFolder)
def save(path='/tmp'): logger = util.getLogger(__name__) egoConfDir = os.getenv('EGO_CONFDIR') if not egoConfDir: logger.error( 'Could not determine the EGO conf directory. Failed to collect EGO log files.' ) return elif not os.path.exists(egoConfDir): logger.error('The EGO conf directory %s does not exist.', egoConfDir) egoConfCp = Data_Collect(path, __name__) # for fname in iglob(os.path.join(egoConfDir,'*.conf')): # egoConfCp.copyit(fname) # for fname in iglob(os.path.join(egoConfDir,'*.xml')): # egoConfCp.copyit(fname) # for fname in iglob(os.path.join(egoConfDir,'*.shared')): # egoConfCp.copyit(fname) egoConfCp.copyit(egoConfDir) egoServiceConfDir = os.path.join(egoConfDir, '..', '..', 'eservice', 'esc', 'conf', 'services') # for fname in iglob(os.path.join(egoServiceConfDir,'*.xml')): # egoConfCp.copyit(fname) egoConfCp.copyit(egoServiceConfDir)
def save(path='/tmp'): logger = util.getLogger(__name__) symConfCp = Data_Collect(path, __name__) egoConfDir = os.getenv('EGO_CONFDIR') if not egoConfDir: logger.error( "Could not determine the EGO conf directory. Failed to collect Symphony configuration files." ) return profileDir = os.path.join(egoConfDir, '..', '..', 'soam', 'profiles') egoServiceConfDir = os.path.join(egoConfDir, '..', '..', 'eservice', 'esc', 'conf', 'services') if os.path.exists(profileDir): symConfCp.copyit(profileDir) else: logger.error( "The Symphony application profile directory %s does not exist. " "Failed to copy application profiles.", profileDir) if os.path.exists(egoServiceConfDir): symConfCp.copyit(egoServiceConfDir) else: logger.error( "The EGO service profile directory %s does not exist. Failed to copy EGO service profiles.", egoServiceConfDir)
import yum from yum.misc import to_xml from yum.update_md import UpdateMetadata, UpdateNotice import util log = util.getLogger(__name__) # # yum 3.2.22 compat: UpdateMetadata.add_notice() not # supported in 3.2.22. # if yum.__version__ < (3, 2, 28): def add_notice(self, un): if not un or not un["update_id"] or un['update_id'] in self._notices: return self._notices[un['update_id']] = un pkglist = un['pkglist'] or [] for pkg in pkglist: for filedata in pkg['packages']: self._cache['%s-%s-%s' % (filedata['name'], filedata['version'], filedata['release'])] = un no = self._no_cache.setdefault(filedata['name'], set()) no.add(un) return True UpdateMetadata.add_notice = add_notice # Work around for: https://bugzilla.redhat.com/show_bug.cgi?id=886240#c13
def runClsSnapshot(config): logger = util.getLogger(__name__) modulePath = os.path.dirname(os.path.abspath(__file__)) modname = os.path.splitext(os.path.basename(__file__))[0] savePath = config.get('TOP', '') if not savePath: savePath = os.path.join(modulePath, 'snapshots') if not os.path.exists(savePath): try: os.makedirs(savePath) except OSError as e: if e.errno == errno.EEXIST: pass else: logger.error("Could not create save path %s.", savePath) return None elif not os.path.exists(savePath): logger.error("The save path %s does not exist.", savePath) return None logFileName = modname + '.log' logFile = os.path.join(savePath, logFileName) stdErr = config.get('LOG_STDERR', '') dbglevel = getattr(logging, config.get('LOG_LEVEL', 'WARN').upper()) formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(name)s.%(funcName)s() - %(message)s') logfh = logging.FileHandler(logFile, mode='a') logfh.setFormatter(formatter) logger.addHandler(logfh) logger.setLevel(dbglevel) if stdErr: logstderr = logging.StreamHandler(sys.stderr) logstderr.setLevel(dbglevel) logstderr.setFormatter( util.LogFileFormatter('%(name)s: %(levelname)s- %(message)s')) logger.addHandler(logstderr) pKey = config.get('pKey', '') if pKey and os.path.isfile(pKey): os.putenv('SSH_PRIVATE_KEY', pKey) modules = configfile.getModuleList(config.get('MODULES', ['all']), allMods) mList = list() myScriptPath = os.path.dirname(os.path.realpath(__file__)) for m in modules: try: mList.append( imp.load_source(__name__ + '.' + m, os.path.join(myScriptPath, m + '.py'))) except ImportError as e: logger.exception( "Error loading module as defined in CLI parameter '--modules' or snapshot.conf MODULES" " parameter. Check that the module %s is valid.", m) savePath = os.path.abspath(savePath) t_list = [] for mod in mList: t_list.append(Thread(target=mod.save, args=[config, savePath])) t_list[-1].start() for thread in t_list: thread.join() logger.debug('%s is complete.', thread) logger.info('Compressing cluster snapshot...') timestr = datetime.now().strftime("%Y-%m-%d-%H%M%S") cwd = os.getcwd() os.chdir(savePath) sstarfile = 'CL_Snapshot.%s.tar.gz' % timestr tar = tarfile.open(sstarfile, 'w:gz') os.chdir(cwd) removeDir = [] for name in os.listdir(savePath): if name.endswith('.tar.gz') or name.endswith('.log'): continue fullname = os.path.join(savePath, name) tar.add(fullname, arcname=name) removeDir.append(fullname) tar.close() for f in removeDir: if not util.rmtree(f, 5): logger.error("Unable to remove '%s'", f) logger.info("Compressed to '%s'", sstarfile) return os.path.join(savePath, sstarfile)
#!/usr/bin/env python3.9 from uuid import uuid4 from flask import Flask, request from flask_socketio import SocketIO, emit, send import flask_socketio import yaml from util import getLogger, manager async_mode = "gevent" app = Flask(__name__) app.config['SECRET_KEY'] = str(uuid4()) logger = getLogger('server', saveName='server.log') slogger = logger.getChild('sIO') elogger = logger.getChild('eIO') socketio = SocketIO(app, logger=slogger, engineio_logger=elogger, async_mode=async_mode, ping_interval=60) with open('event_list.yaml') as f: event_dict = yaml.safe_load(f) with open('user.yaml') as f: users = manager(yaml.safe_load(f))
#-*- coding: UTF-8 -*- import logging import logging.config import settings import util import engine logging.config.dictConfig(settings.logging) logger = util.getLogger() class Application: """docstring for Application""" def __init__(self): logger.info('initializing applicaiton') def buildEnv(self): logger.info('buiding environment') def run(self): searcher = engine.Searcher() searcher.query('list append') def main(): app = Application() logger.info('application is running') app.run()
import util from consumer import Consumer import environment import cache log = util.getLogger('ss.wizard') def get_payload(endpoint): ttl = cache.TIME_HOUR / 2 endpoint = util.sources_endpoint(endpoint) return environment.json_from_url(endpoint, expires=ttl) def translate(foreign): final_url = foreign.get('final_url') if final_url: return final_url else: endpoint = util.translate_endpoint(foreign['original_url'], foreign['foreign_url']) response = environment.json_from_url(endpoint) return util.translated_from(response) class Wizard(object): def __init__(self, endpoint, avoid_flv=False, start_at=0): self.endpoint = endpoint
def save(config, lsfConfig, path='/tmp/'): logger = getLogger(__name__) sysDataCollector = Data_Collect(path, __name__) for syscmd in Sys_Tasks: logger.debug("Calling %s ..." % syscmd) sysDataCollector.saveit(syscmd)
#!/usr/bin/env python import unittest import util import subprocess from certConfig import Resource from certConfig import CertTestConfig logger = util.getLogger('certTest') class CallbackScaleTest(unittest.TestCase): res = Resource("CallbackScaleTest") @classmethod def setUpClass(cls): try: logger.info("------------------------------------------") logger.info("set up the fixture for Callback Scale Test") cls.res.preTestValidation() cls.res.configure() cls.res.deployInfrastucture() cls.res.deployVM() cls.res.powerVM() logger.info("Press ENTER to continue...") input() except: cls.res.cleaner.pop_all().close() cls.res.saveLog() raise @classmethod
__author__ = 'strong' import splunk.entity as entity import splunk.admin as admin import util, time import splunklib.client as client logger = util.getLogger() CONF_TARGET = 'addon_target' CONF_WEB = 'configs/conf-web' TARGET_PROPERTIES = ['port', 'scheme', 'username', 'password'] TARGET_APP = 'Splunk_TA_snow' TARGET_OWNER = 'nobody' TIME_FORMAT = "%Y-%m-%d %H:%M:%S" class TargetManager(object): def __init__(self, app=None, owner=None, session_key=None): self._app = app self._owner = 'nobody' # so that conf file will be saved in app self._sessionKey = session_key splunkd_host_port = self._get_entity(CONF_WEB, 'settings').get( 'mgmtHostPort', '127.0.0.1:8089') host_and_port = splunkd_host_port.split(':') self.local_splunk_host = host_and_port[0] self.local_splunk_port = host_and_port[1] logger.info('app %s, owner %s, host %s, port %s' % (self._app, self._owner, self.local_splunk_host,
def save(config, path='/tmp/S_SHUT'): modName = os.path.splitext(os.path.basename(__file__))[0] savePath = os.path.join(path, modName) logger = util.getLogger(__name__) if os.path.exists(savePath): dt = strftime("%Y-%m-%d:%H:%M:%S", gmtime()) os.rename(savePath, savePath + '.' + dt) try: os.makedirs(savePath) except Exception as e: if e.errno == errno.EEXIST: pass else: logger.exception("Could not create directory path '%s'.", savePath) return logFileName = modName + '.log' debugLevel = config.get('LSF_LOG_LEVEL') LOGFILE = os.path.join(savePath, logFileName) formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(name)s.%(funcName)s() - %(message)s') fHandle = logging.FileHandler(LOGFILE) fHandle.setFormatter(formatter) # fHandle.addFilter(util.ReplaceMainFilter()) if debugLevel: try: fHandle.setLevel(getattr(logging, debugLevel.strip().upper())) except: pass logger.addHandler(fHandle) lsfModules = configfile.getModuleList(config.get('LSF_MDLS', allMods), allMods) lsf_mods = [] myScriptPath = os.path.dirname(os.path.realpath(__file__)) for mod in lsfModules: try: lsf_mods.append( imp.load_source(__name__ + '.' + mod, os.path.join(myScriptPath, mod + '.py'))) except ImportError: logger.error( "Error loading module as defined in CLI parameter '--lsf_mdls' or snapshot.conf LSF_MDLS " "parameter. Check that the module %s is valid.", mod) if not os.getenv('LSF_ENVDIR'): logger.warn( 'Cannot find $LSF_ENVDIR in the environment. Data collection will be incomplete.' ) configFilePath = os.path.join(os.getenv('LSF_ENVDIR', '/etc'), 'lsf.conf') lsfConfig = {} try: lsfConfig = configfile.getConfigFromFile(configFilePath) except IOError: logger.error( "Could not load %s. LSF data collection will be incomplete.", configFilePath) for mod in lsf_mods: mod.save(config, lsfConfig, savePath) fHandle.close() logger.removeHandler(fHandle)
def save(path='/tmp'): logger = util.getLogger(__name__) egoDC = Data_Collect(path, __name__) for cmd in egoTasks: logger.debug("Calling %s ..." % cmd) egoDC.saveit(cmd)
def getClusterSnapshotConfig(configFile=None, arguments=None, preferConfigFile=False): logger = getLogger(__name__) logLevels = ['critical', 'error', 'warning', 'info', 'debug'] configFromFile = {} if configFile: if os.path.isfile(configFile): configFromFile = getConfigFromFile(configFile) else: logger.warn( "The snapshot configuration file %s could not be found.", configFile) configFromArg = {} if arguments: parser = argparse.ArgumentParser( description='Spectrum Cluster Snapshot Tool') # General arguments. parser.add_argument("--source_dir", dest="SOURCE_DIR", metavar="<path>", help="Cluster source file location.") parser.add_argument("--save_path", dest="TOP", metavar="<path>", help="Top dir to save snapshot data in.") parser.add_argument("--log_level", dest="LOG_LEVEL", default='warning', choices=logLevels, help="debug level") parser.add_argument( "--log_stderr", dest="verbose", default='n', choices=['y', 'n'], help="Run in verbose mode by printing logging message to stderr.") choiceList = getChoiceList(clsnapshot.allMods) parser.add_argument( "--modules", dest="MODULES", default=['all'], nargs="+", choices=choiceList, help= "List of cluster data to collect. Items marked with ~ are explicitly not collected." ) # LSF arguments. choiceList = getChoiceList(lsf.allMods) parser.add_argument( "--lsf_mdls", dest="LSF_MDLS", default=['all'], nargs="+", choices=choiceList, help= "List of LSF data to collect. Items marked with ~ are explicitly " "not collected.") parser.add_argument("--lsf_log_level", default='warning', dest="LSF_LOG_LEVEL", choices=logLevels, help="Debug level for LSF data collection.") parser.add_argument("--lsf_events_num", type=int, dest="EVENTS_NUM", metavar="<number of lsb.events files>", help="The number of lsb.events* to save") # EGO arguments choiceList = getChoiceList(ego.allMods) parser.add_argument( "--ego_mdls", dest="EGO_MDLS", nargs="+", choices=choiceList, default=["all"], help= "The comma delimited list of EGO data to collect. Items marked with ~ are never " "collected.") parser.add_argument("--ego_log_level", dest="EGO_LOG_LEVEL", default='warning', choices=logLevels, help="Debug level for EGO data collection.") # SYM arguments choiceList = getChoiceList(sym.allMods) parser.add_argument( "--sym_mdls", dest="SYM_MDLS", default=['all'], choices=choiceList, nargs="+", help= "The comma delimited list of SYM data to collect. Items marked with ~ are never" " collected.") parser.add_argument("--sym_log_level", default='warning', dest="SYM_LOG_LEVEL", choices=logLevels, help="Debug level for SYM data collection.") parser.add_argument( "--pkey", action="store", dest="pKey", metavar="<path>", help="Private key file path for logging into remote hosts.") configFromArg = vars(parser.parse_args(arguments)) if preferConfigFile: totalConfig = configFromArg totalConfig.update(configFromFile) else: totalConfig = configFromFile totalConfig.update(configFromArg) return totalConfig
import shutil import cProfile import pstats from mtoken import Character, Morph, LToken, NPC from macro import CssMacro, TMacro, LibMacro, SMacro from mtable import Table, Entry from cmpgn import Campaign, CProp, PSet from zone import Zone from util import lName, getLogger, configureLogger, parse_args from data import content #host = "192.168.200.7:5123" host = "localhost:5123" log = getLogger(lName) morph_props = '''[ {"name": "pools", "showOnSheet": true, "value": "Ins {insight} | Mox {moxie} |Vig {vigor} | Flex {flex}"} ] ''' pc_props = '''[ {"name": "aptitudes", "showOnSheet": true, "value": "COG {cognition} | INT {intuition} | REF {reflexes} | SAV {savvy} | SOM {somatics} | WIL {willpower}"}, {"name": "pools", "showOnSheet": true, "value": "Ins {insight} | Mox {moxie} |Vig {vigor} | Flex {flex}"}, {"name": "initiative", "showOnSheet": true, "value": "{(reflexes + intuition)/5}"}, {"name": "lucidity", "showOnSheet": true, "value": "{willpower*2}"}, {"name": "insanity", "showOnSheet": true, "value": "{lucidity*2}"}, {"name": "trauma", "showOnSheet": true, "value": "{lucidity/5}"}, {"name": "infection", "showOnSheet": true, "value": "{psi*10}"}, {"name": "morph", "showOnSheet": true, "value": "{morph}"} ]
import sendgrid import util, configparser, getopt, subprocess, sys, time, psutil #from models import ProcessTimeSeries from models import Database start_time = time.time() # initial setup config = util.readConfig() logger = util.getLogger(config) sg = util.getSendGrid(config) db = Database(config) # parse arguments try: opts, args = getopt.getopt(sys.argv[1:], "hvrt", ["--restarts"]) except getopt.GetoptError: print ("Not enough options") sys.exit(2) for opt, arg in opts: if opt == '-h': util.printhelp() sys.exit(0) elif opt in ("-r", "--restarts"): util.printrestarts(config) sys.exit(0) elif opt in ("-t", "--time"): util.printtimeseries(config) sys.exit(0) elif opt == "-v":
class QueryMasterFactory(ServerFactory): protocol = gamespy.master.QueryMaster log = util.getLogger('gamespy.ra3master', self) gameName = gameId
def post(self): action = self.request.get('action') request_ = self.request result = getattr(self, action)(request_) util.getLogger(__name__).debug('ajax action "%s"return value is %s', action,simplejson.dumps(result)) self.response.out.write(simplejson.dumps(result))
__author__ = 'strong' import httplib import urllib import base64 import util as u import splunklib.client as client import splunklib.binding as binding import splunk.admin as admin from urlparse import urlparse logger = u.getLogger() SETUP_ACCOUNT_URL = "service_now_setup/snow_account" TABLE_NAME = "cmn_department" class SnowAccount(object): def __init__(self, snow_url, release, username, password, name='snow_account'): self.name = name self.snow_url = snow_url self.release = release and release.strip() or '' self.username = username and username.strip() or '' self.password = password and password.strip() or ''
import gzip import os import shutil import xml.dom.minidom import yum.comps import util log = util.getLogger(__name__) def yum_group_to_model_group(obj): """ Translate a yum.comps.Group to a dict @param obj: yum.comps.Group object @return: dict """ grp = {} grp['id'] = obj.groupid grp['name'] = obj.name grp['description'] = obj.description grp['user_visible'] = obj.user_visible grp['display_order'] = obj.display_order grp['default'] = obj.default grp['langonly'] = obj.langonly grp['mandatory_package_names'] = [] grp['mandatory_package_names'].extend(obj.mandatory_packages.keys()) grp['optional_package_names'] = [] grp['optional_package_names'].extend(obj.optional_packages.keys()) grp['default_package_names'] = [] grp['default_package_names'].extend(obj.default_packages.keys())
""" python version:3.5 """ __author__ = "Kantha Girish", "Pankaj Uchil Vasant", "Samana Katti" import json import twitter from inverted_index import InvertedIndex from database import getDBInstance from util import getLogger, config logger = getLogger("populate_feed") def getTweets(woeid): """ :param woeid : Where On Earth ID of the place of interest for which trends and related tweets are to be extracted. :return: A python dictionary containing keys as trends and the values as the list of tweets fetched from the twitter API call. tweets = { "trend1": [tweet1, tweet2, ...], "trend2": [tweet3, tweet4, ...] } This function reads config file `config.ini` containing the access keys for twitter API calls and creates a connection to the twitter API. The trends are fetched for the specified `woeid` and the tweets are fetched for each trend. The result is returned as a
""" python version:3.5 """ __author__ = "Kantha Girish", "Pankaj Uchil Vasant", "Samana Katti" from os import listdir from os.path import isdir, isfile, join from naivebayes import NaiveBayes from util import config, getLogger logger = getLogger("train_classifiers") def getData(folder): """ :param folder: full path of the folder read files from for training the model :return: documents(list of documents), labels(list of labels) This function will put all the contents of the documents in each category into a list and creates a list of labels which are the names of the category folders """ data = {} for f in listdir(folder): if isdir(join(folder, f)): data[f] = [] for file in listdir(join(folder, f)): if isfile(join(folder, f, file)) and ".txt" in file: with open(join(folder, f, file)) as fobj: data[f].append(fobj.read().replace('\n', ''))
# GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with django-facebookconnect. If not, see <http://www.gnu.org/licenses/>. from django.http import HttpResponse, HttpResponseRedirect, Http404 from django.template import RequestContext from django.core.urlresolvers import reverse from django.shortcuts import render_to_response from django.contrib.auth import authenticate, login, logout, REDIRECT_FIELD_NAME from django.contrib.auth.forms import AuthenticationForm, UserCreationForm from django.contrib.auth.models import User from django.conf import settings import logging, util log = util.getLogger('facebookconnect.views') from facebook.djangofb import require_login as require_fb_login from facebookconnect.models import FacebookProfile from facebookconnect.forms import FacebookUserCreationForm def facebook_login(request, redirect_url=None, template_name='facebook/login.html', extra_context=None): """ facebook_login =============================== Handles logging in a facebook user. Usually handles the django side of
from util import getLogger from _default import cache_store as _default_store log = getLogger('ss.cache') TIME_MINUTE = 60 TIME_HOUR = 60 * TIME_MINUTE TIME_DAY = 24 * TIME_HOUR TIME_WEEK = 7 * TIME_DAY TIME_MONTH = 30 * TIME_DAY TIME_YEAR = 365 * TIME_DAY def reset(): store.reset() def set(key, value, **kwargs): if not 'expires' in kwargs: kwargs['expires'] = 10 store.set(key, value, **kwargs) def get(key): try: return store.get(key) except: pass def fetch(key, cb, **kwargs): if expired(key): log.debug('%s is stale' % key) set(key, cb(), **kwargs) return get(key)
import util from consumer import Consumer import environment import cache log = util.getLogger('ss.wizard') def get_payload(endpoint): ttl = cache.TIME_HOUR / 2 endpoint = util.sources_endpoint(endpoint) return environment.json_from_url(endpoint, expires = ttl) def translate(foreign): final_url = foreign.get('final_url') if final_url: return final_url else: endpoint = util.translate_endpoint( foreign['original_url'], foreign['foreign_url']) response = environment.json_from_url(endpoint) return util.translated_from(response) class Wizard(object): def __init__(self, endpoint, avoid_flv = False, start_at = 0): self.endpoint = endpoint self.file_hint = None self.avoid_flv = avoid_flv self.consumer = None self.start_at = start_at
import util from wizard import Wizard from downloadstatus import DownloadStatus log = util.getLogger('ss.downloader') def curl_strategy_command(dl): command = [ 'curl', '--location', '--referer', dl.consumer.url, '--cookie', dl.consumer.cookie_string, '--user-agent', dl.consumer.ua, '--stderr', dl.status_file, '--output', dl.local_partfile ] if int(dl.limit) > 0: command.append('--limit-rate') command.append('%sK' % dl.limit) command.append(dl.consumer.asset_url) return command def wget_strategy_command(dl): command = [ 'wget', '--no-cookies', '--continue', '--referer', dl.consumer.url,
from lcs import * from lcdb import Nation, Code, Text, store from util import Cache, getLogger, utf8, convertRelativeLink from bs4 import BeautifulSoup from bs4.element import Tag from zipfile import ZipFile from datetime import datetime import requests, os, logging, shutil from clint.textui import progress from multiprocessing import Pool log = getLogger(__name__) now = datetime.now BS = lambda xml: BeautifulSoup(xml, 'lxml') dedup = lambda l: list(set(l)) # The UK presents a unique challenge in that, while it has great # data accessibility for changes, its legal publication is ongoing # and does not have a general revision identified for the entirety # this essentially means that each scrape will have its own revision # and that revision will only be related to when it was scraped. # Essentially this program was architected to report on publications # of entire legal codes, and not real-time changes. Modifications # could be made to specify point-in-time reporting, which would # introduce significant complexity but easily handle this instance # and allow the consumption of the UK's RSS feeds for changes.
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import jinja2 import re from util import jenv, getLogger log = getLogger(__name__) cachedMacro = {} class Macro(object): def __init__(self, label, tmpl, group, colors, others=None): self.tmpl = tmpl self._template = None self._label = label self.colors = colors self.cattr = ['cognition', 'intuition', 'reflex', 'savvy', 'somatics', 'willpower'] # XXX eclipse specific in lib self.group = group self.others = others if (others is not None) else {} self.allowPlayerEdits = 'false' self.autoExecute = 'true' self._command = None def __str__(self): return '%s<%s,grp=%s>' % (self.__class__.__name__, self.label, self.group) def __repr__(self): return str(self) def to_dict(self): return dict(self.__dict__) @property def label(self): return self._label
def connectionMade(self): EaServer.connectionMade(self) self.log = util.getLogger('login.cnc4', self) self.msgFactory = MessageFactory(self.transport, EaLoginMessage) self.hlrFactory = MessageHandlerFactory( self, 'ea.games.redalert3.Ra3MsgHlr')
#!/usr/bin/env python from bottle import * from redis import Redis import util, json from time import strftime,gmtime,time,sleep KEY_MAPPING="arduino:keymapping" KEY_POSITIONS="arduino:keypositions" KEY_POSITION_FMT="arduino:keyposition:%s" ARDUINO_COMMAND="arduino:remote-command" LOG=util.getLogger('arduino_remote_server') AUTH_COOKIE='remote-auth' COOKIE_SECRET=util.CONFIG['web']['cookie'] PASSWORD=util.CONFIG['web']['password'] def auth_required(): def decorator(func): def wrapper(*args, **kwargs): authd=request.get_cookie(AUTH_COOKIE,secret=COOKIE_SECRET) if authd !=PASSWORD: redirect('/login') return func(*args, **kwargs) return wrapper return decorator def getRelativeCookieTime(days): return strftime("%a, %d-%b-%Y %H:%M:%S GMT", gmtime(days*24*60*60+time())) from bottle import get, post, request
def work(self): while True: item=self.redis.blpop("arduino:remote-command",timeout=5); if item: self.misses=0 self.handleItem(item[1]) #0=> key else: self.misses+=1 if self.misses > self.misslimit and self.serial: self.serial.close() self.serial=None log.debug("closed serial port") if __name__ =="__main__": log=util.getLogger('arduino_remote_worker') #set up redis rdb=Redis() #setup serial PORT=util.CONFIG['hardware']['port'] #kevin is on COM4 MISSLIMIT=10000000000 getserial=lambda: serial.Serial(port=PORT, baudrate=115200,timeout=2) while True: try: worker(rdb,log,MISSLIMIT,getserial).work() except KeyboardInterrupt: log.info("KeyboardInterrupt>>shutdown") break except:
import urllib, zlib, re, gzip, urlparse import Cookie import mechanize import util import environment import cache import datetime import time log = util.getLogger('ss.consumer') def browser_agent(user_agent): br = mechanize.Browser() br.set_handle_robots(False) br.addheaders = [ ('User-agent', user_agent), ('Accept', '*/*'), ('Accept-Encoding', 'gzip,deflate,identity'), ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'), ('Accept-Language', 'en-us,en;q=0.5'), ] return br def all_procedures(): return environment.json_from_url(util.procedures_endpoint(), expires = cache.TIME_DAY) def procedure_for_url(url):
def connectionMade(self): EaServer.connectionMade(self) self.log = util.getLogger("login.nfsps2", self) self.msgFactory = MessageFactory(self.transport, EaLoginMessage) self.hlrFactory = MessageHandlerFactory(self, "ea.games.redalert3.Ra3MsgHlr")