def on_config_change(self, config, section, key, value): if (section not in pumppins.keys()) and (section != 'general'): logging.info('Something went wrong.') logging.debug('Config: section not valid: Section: %s, Key: %s, Value: %s',section,key,value) else : logging.debug('Config: Section; %s, Key: %s, Value: %s',section,key,value) if section is 'general': if key is 'speed': if int(value) > 5 and int(value) < 0: self.popup = Popup(title="Error", content=Label(text='Please keep the speed between 0 and 5.')) popup.open() logging.info('Config: Speed invalid') logging.debug('Config: Invalid Speed: %s',value) else: global DUTY DUTY = int(value)/5 logging.info('Config: Duty cycle is now %i',int(value)/5) elif key is 'debug': if value is '0': logging.setLevel('INFO') else: logging.setLevel('DEBUG') else: logging.info('Config: Key not found: %s',key) else: if section is '1': if key is 'enable': if value is '0': #disable pump print(self)
def create_logger(name, silent=False, to_disk=False, log_file=None): """Create a new logger""" # setup logger log = logging.getLogger(name) log.setLevel(logging.DEBUG) log.propagate = False formatter = logging.Formatter(fmt='%(message)s', datefmt='%Y/%m/%d %I:%M:%S') if not silent: ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) log.addHandler(ch) if to_disk: log_file = log_file if log_file is not None else strftime( "log/log_%m%d_%H%M.txt", gmtime()) if type(log_file) == list: for filename in log_file: fh = logging.FileHandler(filename, mode='w') fh.setLevel(logging.INFO) fh.setFormatter(formatter) log.addHandler(fh) if type(log_file) == str: fh = logging.FileHandler(log_file, mode='w') fh.setLevel(logging.INFO) fh.setFormatter(formatter) log.addHandler(fh) return log
def __setup_logger(): """ Logger is set up before anything else is done """ from os import environ import logging import sys local_dict = __exec_config_files("logging.py") logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', stream=sys.stdout) logging_level = environ.get('LADA_LOGGING_LEVEL', local_dict['logging_level']) try: level = int(logging_level) except: pass else: logging_level = level root_logger = local_dict['root_logger'] logger = logging.getLogger(root_logger) if hasattr(logging, 'upper'): logging.setLevel(logging_level.upper()) else: logger.setLevel(logging_level) for filename in __find_config_files("logging.py"): logger.debug("Read configuration file %s" % filename) return logger
def get_logger(log_file_name): logging.setLevel(logging.DEBUG) formatter = logging.Formatter(fmt='%(levelname)-6s - %(message)s - %(filename)s:%(lineno)d - %(msecs)d ms[%(asctime)s]', datefmt='%x %X %Z') handler = logging.handlers.RotatingFileHandler(filename=log_file_path, mode='a', maxBytes=1000000, backupCount=3) handler.setFormatter(formatter) logger.addHandler(handler) return logger
def debug_logging(level): logger = logging.getLogger() old_level = logger.getEffectiveLevel() logger.setLevel(level) try: yield finally: logging.setLevel(old_level)
def main(): parser = argparse.ArgumentParser(description="Process parameters") parser.add_argument("-d", "--directory", default="gogdl", help="Set Directory") parser.add_argument( "-p", "--platform", default="4", help="Set platforms to download", choices=["1", "2", "3", "4", "5", "6", "7", "w", "m", "l"]) parser.add_argument("games", nargs="*", default="downloadallgames", help="Games to Download, separated bp spaces") parser.add_argument("--log", help="Set minimum logging level", choices=[ "debug", "info", "warning", "error", "critical", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" ]) parser.add_argument("-V", "--version", help="Print version information and exit", action="version", version="%(prog)s " + __version__) parser.add_argument("--debug", help="Enable Debugging", action="store_true") args = parser.parse_args() # Configure Application based on the arguments recieved userHome = os.path.expanduser("~") if args.debug: logging.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: if args.log: # Bind loglevel to the string value obtained from the command line argument argloglevel = args.log # Convert to upper case to allow the user to specify --log=DEBUG or --log=debug loglevel = argloglevel.upper() logging.setLevel(loglevel) ch.setLevel(loglevel) # If debugging enabled, log the arguments passed to the program logger.debug("Arguments Processed") userHome = os.path.expanduser("~") dlDir = userHome + "/" + args.directory os.makedirs(dlDir, exist_ok=True) download(args.platform, args.games, dlDir) return 0
def main(): import sys import logging from optparse import OptionParser ### usage = "usage: %prog [options] [coverage files ... ]" option_parser = OptionParser(usage=usage) option_parser.add_option('-x', '--exclude-patterns', action="store", dest="exclude_patterns_file", help="file containing regexp patterns of files to exclude from report") option_parser.add_option('-f', '--files-list', action="store", dest="files_list", help="file containing filenames to report on") option_parser.add_option('-q', '--quiet', action='store_true', dest='quiet', help='Suppress all but error messages') option_parser.add_option('-D', '--debug', action='store_true', dest='debug', help='Show all debugging messages') (options, args) = option_parser.parse_args() if options.quiet: logging.setLevel(logging.WARNING) if options.debug: logger.setLevel(logging.DEBUG) exclude_patterns = [] if options.exclude_patterns_file: exclude_patterns = read_exclude_patterns(exclude_patterns_file) files_list = {} if options.files_list: files_list = annotate.read_files_list(options.files_list) if not args: args = ['.figleaf'] coverage = {} for filename in args: logger.debug("loading coverage info from '%s'\n" % (filename,)) d = figleaf.read_coverage(filename) coverage = figleaf.combine_coverage(coverage, d) if not coverage: logger.warning('EXITING -- no coverage info!\n') sys.exit(-1) create_report(coverage, exclude_patterns, files_list)
def log_init(): logging.setLevel(logging.DEBUG) if args.log_file is not False: #fh = logging.handlers.RotatingFileHandler(filename = args.log_file, mode='w+', maxBytes = 5000000) fh = logging.FileHandler(filename=args.log_file, mode='w+') fh.setLevel(getattr(logging, args.log_level)) ff = logging.Formatter('%(asctime)s - %(levelname)8s - %(message)s') #ff = logging.Formatter('%(message)s') fh.setFormatter(ff) logging.addHandler(fh)
def main(): parser = argparse.ArgumentParser(description="Process parameters") parser.add_argument("-d","--directory", default="gogdl", help="Set Directory") parser.add_argument("-p","--platform", default="4", help="Set platforms to download", choices=["1","2","3","4","5","6","7","w","m","l"]) parser.add_argument("games", nargs="*", default="downloadallgames", help="Games to Download, separated bp spaces") parser.add_argument("--log", help="Set minimum logging level", choices=["debug","info","warning","error","critical","DEBUG","INFO","WARNING","ERROR","CRITICAL"]) parser.add_argument("-V", "--version", help="Print version information and exit", action="version", version="%(prog)s " + __version__) parser.add_argument("--debug", help="Enable Debugging", action="store_true") args = parser.parse_args() # Configure Application based on the arguments recieved userHome = os.path.expanduser("~") if args.debug: logging.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) else: if args.log: # Bind loglevel to the string value obtained from the command line argument argloglevel=args.log # Convert to upper case to allow the user to specify --log=DEBUG or --log=debug loglevel = argloglevel.upper() logging.setLevel(loglevel) ch.setLevel(loglevel) # If debugging enabled, log the arguments passed to the program logger.debug("Arguments Processed") userHome = os.path.expanduser("~") dlDir=userHome + "/" + args.directory os.makedirs(dlDir,exist_ok=True) download(args.platform,args.games,dlDir) return 0
def get_logger(log_file_name): logging.setLevel(logging.DEBUG) formatter = logging.Formatter( fmt= '%(levelname)-6s - %(message)s - %(filename)s:%(lineno)d - %(msecs)d ms[%(asctime)s]', datefmt='%x %X %Z') handler = logging.handlers.RotatingFileHandler(filename=log_file_path, mode='a', maxBytes=1000000, backupCount=3) handler.setFormatter(formatter) logger.addHandler(handler) return logger
def setUpClass(cls): root_logger.getLogger('').setLevel(root_logger.WARNING) LOG_FILE_NAME = "log.{}".format(splitext(split(__file__)[1])[0]) file_h = root_logger.FileHandler(LOG_FILE_NAME, mode='w') file_h.setLevel(root_logger.DEBUG) console = root_logger.StreamHandler() console.setLevel(root_logger.WARNING) logging = root_logger.getLogger(__name__) logging.setLevel(root_logger.DEBUG) logging.addHandler(console) logging.addHandler(file_h)
def init_logging(): LOG_LEVEL = logging.DEBUG LOGFORMAT = " %(log_color)s%(levelname)-8s%(reset)s | %(log_color)s%(message)s%(reset)s" from colorlog import ColoredFormatter logging.root.setLevel(LOG_LEVEL) formatter = ColoredFormatter(LOGFORMAT) stream = logging.StreamHandler() stream.setLevel(LOG_LEVEL) stream.setFormatter(formatter) log = logging.getLogger('backup') log.setLevel(LOG_LEVEL) log.addHandler(stream) return log
def set_log(level): """ change log level for application: [debug|info|warn|error] """ level = level.lower() if level == 'debug': lvl = logging.DEBUG elif level == 'info': lvl = logging.INFO elif level == 'warn': lvl = logging.WARNING elif level == 'error': lvl = logging.ERROR else: raise InvalidUsage('Unknown Log level {}'.format(level) ,status_code=410) wideq.set_log_level(lvl) logging.setLevel(lvl) create_logger(api).setLevel(lvl) return Response({'log':level})
def configure_logging(): logpath = config['smtpd']['log_file'] format=('{blue1}%(asctime)s ' '{red1}%(filename)s:%(lineno)d ' '{yel1}%(levelname)s ' '{gre1}%(funcName)s() ' '{res}%(message)s').format(blue1=AnsiColor.blue, red1=AnsiColor.red, yel1=AnsiColor.yellow, res=AnsiColor.end, gre1=AnsiColor.magenta) format1=('%(asctime)s ' '%(filename)s:%(lineno)d ' '%(levelname)s ' '%(funcName)s() ' '%(message)s') logFormatter = loggingg.Formatter(format) logFormatterfile = loggingg.Formatter(format1) logging = loggingg.getLogger() logging.setLevel(loggingg.INFO) fileHandler = loggingg.FileHandler(logpath) fileHandler.setFormatter(logFormatterfile) logging.addHandler(fileHandler) consoleHandler = loggingg.StreamHandler() consoleHandler.setFormatter(logFormatter) logging.addHandler(consoleHandler)
reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'white,bg_red', }, secondary_log_colors={}, style='%' ) stream = logging.StreamHandler() stream.setLevel(LOG_LEVEL) stream.setFormatter(formatter) logging = logging.getLogger('pythonConfig') logging.setLevel(LOG_LEVEL) logging.addHandler(stream) ''' ______________________________________________________________________________ constants ______________________________________________________________________________ ''' TIME_SLOT_DURATION = global_setting.constants['time_slot_duration'] # duration of a time step in seconds NUM_MOBILE_DEVICE = global_setting.constants['num_mobile_device'] # NETWORK_BANDWIDTH = global_setting.constants['network_bandwidth'] # in Mbps NUM_TIME_SLOT = global_setting.constants['num_time_slot'] RUN_NUM = global_setting.constants['run_num'] ALGORITHM = global_setting.constants['algorithm_name'] ORIGINAL_OUTPUT_DIR = OUTPUT_DIR = global_setting.constants['output_dir'] networkList = global_setting.constants['network_list'] SAVE_TO_FILE_FREQUENCY = global_setting.constants['save_to_file_frequency'] # 1 means every time slot, 10 means every 10 time slots... PROBLEM_INSTANCE = global_setting.constants['problem_instance'] NUM_REPEAT = global_setting.constants['num_repeat']
def main(argv): init_logs('/var/log/atmo/atmo_init_full.log') instance_data = {"atmosphere" : {}} service_type = None instance_service_url = None instance_service_url = None server = None root_password = None user_id = None vnclicense = None try: opts, args = getopt.getopt( argv, "t:u:s:i:T:N:v:", ["service_type=", "service_url=", "server=", "user_id=", "token=", "name=", "vnc_license=", "root_password="******"Invalid arguments provided.") sys.exit(2) for opt, arg in opts: if opt in ("-t", "--service_type"): instance_data["atmosphere"]["service_type"] = arg service_type = arg elif opt in ("-T", "--token"): instance_data["atmosphere"]["instance_token"] = arg instance_token = arg elif opt in ("-N", "--name"): instance_data["atmosphere"]["name"] = arg instance_token = arg elif opt in ("-u", "--service_url"): instance_data["atmosphere"]["instance_service_url"] = arg instance_service_url = arg elif opt in ("-s", "--server"): instance_data["atmosphere"]["server"] = arg global ATMOSERVER ATMOSERVER = arg server = arg elif opt in ("-i", "--user_id"): instance_data["atmosphere"]["userid"] = arg user_id = arg elif opt in ("-v", "--vnc_license"): #instance_data["atmosphere"]["vnc_license"] = arg vnclicense = arg elif opt in ("--root_password"): root_password = arg elif opt == '-d': global _debug _debug = 1 logging.setLevel(logging.DEBUG) #TODO: What is this line for? source = "".join(args) logging.debug("Atmosphere request object - %s" % instance_data) instance_metadata = get_metadata() logging.debug("Instance metadata - %s" % instance_metadata) linuxuser = instance_data['atmosphere']['userid'] linuxpass = "" public_ip = get_public_ip(instance_metadata) hostname = get_hostname(instance_metadata) set_hostname(hostname) instance_metadata['linuxusername'] = linuxuser instance_metadata["linuxuserpassword"] = linuxpass instance_metadata["linuxuservncpassword"] = linuxpass logging.debug("Atmoserver - %s" % ATMOSERVER) distro = get_distro() logging.debug("Distro - %s" % distro) #TODO: Test this is multi-call safe update_sshkeys(instance_metadata) update_sudoers() if not in_sudoers(linuxuser): add_sudoers(linuxuser) if not in_etc_group('/etc/group', linuxuser): add_etc_group(linuxuser) #is_updated_test determines if this sensitive file needs if not is_updated_test("/etc/ssh/sshd_config"): ssh_config(distro) if root_password: set_root_password(root_password, distro) if not is_rhel(distro): run_command(['/usr/bin/apt-get', 'update']) #else: # run_command(['/usr/bin/yum', 'check-update']) mount_storage() ldap_install() etc_skel_bashrc(linuxuser) run_command(['/bin/cp', '-rp', '/etc/skel/.', '/home/%s' % linuxuser]) run_command(['/bin/chown', '-R', '%s:iplant-everyone' % (linuxuser,), '/home/%s' % linuxuser]) run_command(['/bin/chmod', 'a+rwxt', '/tmp']) run_command(['/bin/chmod', 'a+rx', '/bin/fusermount']) run_command(['/bin/chmod', 'u+s', '/bin/fusermount']) vnc(linuxuser, distro, vnclicense) iplant_files(distro) #atmo_cl() nagios() distro_files(distro) update_timezone() shellinaboxd(distro) insert_modprobe() denyhost_whitelist() modify_rclocal(linuxuser, distro, hostname) notify_launched_instance(instance_data, instance_metadata) logging.info("Complete.")
def main(): """ Register node to engine """ _DEFAULT_NODE_MANAGEMENT_PORT = 54321 parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, description="Tool to register node to oVirt Engine", epilog="Example of use:\n%(prog)s " "--authorize-engine-ssh-key " "--url https://ovirtengine.localdomain:443 " "--fingerprint DC:B9:67:35:60:FC:29:E4:C8:03:4E:5:7A:0D " "--node-management-address 10.10.1.1 " "--node-management-port 54321", ) parser.add_argument( "--authorize-engine-ssh-key", action="store_true", help="Add ovirt engine public ssh key to node authorized keys", ) parser.add_argument("--fingerprint", help="Fingerprint to be validate against engine web CA") parser.add_argument("--hostname", help="Speficy the human-readable name of the node being registered") parser.add_argument("--skip-fingerprint", action="store_true", help="Skip fingerprint check") parser.add_argument("--node-management-address", help="Node IP address to be registered") parser.add_argument("--node-management-port", help="Node management port") parser.add_argument("--node-uuid", help="Provide an explicit host uuid") parser.add_argument("--url", help="Engine URL", required=True) parser.add_argument("--verbose", action="store_true", help="verbose mode, set logging to DEBUG instead of ERROR") args = parser.parse_args() url = urlparse.urlparse(args.url) url_port = url.port if url.scheme == "https" and url_port is None: url_port = 443 elif url.scheme == "http" and url_port is None: url_port = 80 if url.scheme == "https" and args.fingerprint: engine_cert = ssl.get_server_certificate((url.hostname, int(url_port))) if not args.skip_fingerprint and args.fingerprint is None: raise RuntimeError("You must use --fingerprint or --skip-fingerprint") if args.verbose: logging.setLevel(logging.DEBUG) if not args.skip_fingerprint and url.scheme == "https": cert_fingerprint = _calculate_fingerprint(engine_cert) if not args.fingerprint.lower() == cert_fingerprint.lower(): str_error = "Fingerprint {0} doesn't match " "server's fingerprint!".format(args.fingerprint.lower()) _LOG.debug(str_error) raise RuntimeError(str_error) if args.fingerprint and url.scheme == "http": _LOG.debug("Skipping fingerprint check, user provided http url") if args.authorize_engine_ssh_key: key = _get_remote_public_ssh_key(args.url) if key is not None: _add_authorized_ssh_key(key) else: _LOG.error("Cannot download public ssh key from %s" % args.url) node_uuid = args.node_uuid if node_uuid is None: node_uuid = getHostUUID(False) if node_uuid is None: raise RuntimeError("Cannot retrieve host UUID") node_hostname = args.hostname if node_hostname is None: node_hostname = socket.gethostname() management_port = args.node_management_port if management_port is None: management_port = _DEFAULT_NODE_MANAGEMENT_PORT node_management_address = args.node_management_address if node_management_address is None: route = routeGet([urlparse.urlparse(args.url).hostname])[0] node_management_address = Route.fromText(route).src if register_node(args.url, node_hostname, node_management_address, int(management_port), node_uuid): _LOG.info( "Registration is completed: url: %s, hostname: %s " "management address: %s management port: %s" % (args.url, node_hostname, node_management_address, management_port) ) else: raise RuntimeError("Cannot complete the registration")
def setlevel(self, lvl): self.loglevel = lvl logging.setLevel(lvl) for logger in self.loggers.itervalues(): logger.setLevel(lvl)
from num_gen import * from ophyd import EpicsSignal from pydm import Display from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * from qtpy import QtCore from qtpy.QtCore import * from qtpy.QtWidgets import (QApplication, QFrame, QGraphicsScene, QGraphicsView, QHBoxLayout, QLabel, QPushButton, QVBoxLayout) from signals import Signals logging = logging.getLogger('ophyd') logging.setLevel('CRITICAL') lock = threading.Lock() class GraphicsView(QGraphicsView): def __init__(self, parent=None): super(GraphicsView, self).__init__(parent) self.setMouseTracking(True) class GraphicsScene(QGraphicsScene): def __init__(self, parent=None): super(GraphicsScene, self).__init__(parent)
import sys import logging from flask.ext.script import Command from botocore.exceptions import ClientError from confidant import app from confidant import iam from confidant import kms from confidant import keymanager from confidant.models.service import Service logging.addHandler(logging.StreamHandler(sys.stdout)) logging.setLevel(logging.INFO) class ManageGrants(Command): def run(self): grants = keymanager.get_grants() try: roles = [x for x in iam.roles.all()] except ClientError: logging.error('Failed to fetch IAM roles.') return services = [] for service in Service.data_type_date_index.query('service'): services.append(service.id) for role in roles: if role.name in services: logging.info('Managing grants for {0}.'.format(role.name)) keymanager._ensure_grants(role, grants)
return consume_info def come_from_wxpay(self, body, key): """ 校验通知是否来自微信服务器 Args: body: key: Returns: """ if __name__ == '__main__': logging = logging.getLogger('root') logging.setLevel('DEBUG') wxpay = WxPay(appid='wxd678efh567hg6787', mch_id='1230000109', secret_key='192006250b4c09247ec02edce69f6a2d') # wxpay.new_unified_order(out_trade_no='20150806125346', body='商品描述', # total_fee='100', # spbill_create_ip='123.12.12.123', # notify_url='http://example.com/wxpay') rst = wxpay.refund(out_trade_no="12333333", out_refund_no="1233333", total_fee=100, refund_fee=100) print(rst)
class BenchmarkDoctor(object): """Checks that the benchmark conforms to the standard set of requirements. Benchmarks that are part of Swift Benchmark Suite are required to follow a set of rules that ensure quality measurements. These include naming convention, robustness when varying execution parameters like `num-iters` and `num-samples` (no setup overhead, constant memory consumption). """ log = logging.getLogger('BenchmarkDoctor') log_naming = log.getChild('naming') log_runtime = log.getChild('runtime') log_memory = log.getChild('memory') log.setLevel(logging.DEBUG) def __init__(self, args, driver=None): """Initialize with command line parameters. Optional `driver` parameter for injecting dependency; used for testing. """ super(BenchmarkDoctor, self).__init__() self.driver = driver or BenchmarkDriver(args) self.results = {} if hasattr(args, 'markdown') and args.markdown: self.console_handler = MarkdownReportHandler(sys.stdout) else: self.console_handler = logging.StreamHandler(sys.stdout) self.console_handler.setFormatter( LoggingReportFormatter(use_color=sys.stdout.isatty())) self.console_handler.setLevel( logging.DEBUG if args.verbose else logging.INFO) self.log.addHandler(self.console_handler) self.log.debug('Checking tests: %s', ', '.join(self.driver.tests)) self.requirements = [ self._name_matches_benchmark_naming_convention, self._name_is_at_most_40_chars_long, self._no_setup_overhead, self._reasonable_setup_time, self._optimized_runtime_in_range, self._constant_memory_use ] def __del__(self): """Close log handlers on exit.""" for handler in list(self.log.handlers): handler.close() self.log.removeHandler(self.console_handler) benchmark_naming_convention_re = re.compile(r'[A-Z][a-zA-Z0-9\-.!?]+') camel_humps_re = re.compile(r'[a-z][A-Z]') @staticmethod def _name_matches_benchmark_naming_convention(measurements): name = measurements['name'] match = BenchmarkDoctor.benchmark_naming_convention_re.match(name) matched = match.group(0) if match else '' composite_words = len(BenchmarkDoctor.camel_humps_re.findall(name)) + 1 if name != matched: BenchmarkDoctor.log_naming.error( "'%s' name doesn't conform to benchmark naming convention.", name) BenchmarkDoctor.log_naming.info( 'See http://bit.ly/BenchmarkNaming') if composite_words > 4: BenchmarkDoctor.log_naming.warning( "'%s' name is composed of %d words.", name, composite_words) BenchmarkDoctor.log_naming.info( "Split '%s' name into dot-separated groups and variants. " "See http://bit.ly/BenchmarkNaming", name) @staticmethod def _name_is_at_most_40_chars_long(measurements): name = measurements['name'] if len(name) > 40: BenchmarkDoctor.log_naming.error( "'%s' name is %d characters long.", name, len(name)) BenchmarkDoctor.log_naming.info( 'Benchmark name should not be longer than 40 characters.') @staticmethod def _select(measurements, num_iters=None, opt_level='O'): prefix = measurements['name'] + ' ' + opt_level prefix += '' if num_iters is None else (' i' + str(num_iters)) return [ series for name, series in measurements.items() if name.startswith(prefix) ] @staticmethod def _optimized_runtime_in_range(measurements): name = measurements['name'] setup, ratio = BenchmarkDoctor._setup_overhead(measurements) setup = 0 if ratio < 0.05 else setup runtime = min([(result.samples.min - correction) for i_series in [ BenchmarkDoctor._select(measurements, num_iters=i) for correction in [(setup / i) for i in [1, 2]] ] for result in i_series]) threshold = 1000 if threshold < runtime: log = (BenchmarkDoctor.log_runtime.warning if runtime < 10000 else BenchmarkDoctor.log_runtime.error) caveat = '' if setup == 0 else ' (excluding the setup overhead)' log("'%s' execution took at least %d μs%s.", name, runtime, caveat) def factor(base): # suitable divisior that's integer power of base return int( pow(base, math.ceil(math.log(runtime / float(threshold), base)))) BenchmarkDoctor.log_runtime.info( "Decrease the workload of '%s' by a factor of %d (%d), to be " "less than %d μs.", name, factor(2), factor(10), threshold) threshold = 20 if runtime < threshold: log = (BenchmarkDoctor.log_runtime.error if runtime == 0 else BenchmarkDoctor.log_runtime.warning) log("'%s' execution took %d μs.", name, runtime) BenchmarkDoctor.log_runtime.info( "Ensure the workload of '%s' has a properly measurable size" " (runtime > %d μs) and is not eliminated by the compiler (use" " `blackHole` function if necessary)." if runtime == 0 else "Increase the workload of '%s' to be more than %d μs.", name, threshold) @staticmethod def _setup_overhead(measurements): select = BenchmarkDoctor._select ti1, ti2 = [ float(min(mins)) for mins in [[result.samples.min for result in i_series] for i_series in [select(measurements, num_iters=i) for i in [1, 2]]] ] setup = ( int(round(2.0 * (ti1 - ti2))) if ti2 > 20 # limit of accuracy else 0) ratio = (setup / ti1) if ti1 > 0 else 0 return (setup, ratio) @staticmethod def _no_setup_overhead(measurements): setup, ratio = BenchmarkDoctor._setup_overhead(measurements) if ratio > 0.05: BenchmarkDoctor.log_runtime.error( "'%s' has setup overhead of %d μs (%.1f%%).", measurements['name'], setup, round((100 * ratio), 1)) BenchmarkDoctor.log_runtime.info( 'Move initialization of benchmark data to the `setUpFunction` ' 'registered in `BenchmarkInfo`.') @staticmethod def _reasonable_setup_time(measurements): setup = min( [result.setup for result in BenchmarkDoctor._select(measurements)]) if 200000 < setup: # 200 ms BenchmarkDoctor.log_runtime.error( "'%s' setup took at least %d μs.", measurements['name'], setup) BenchmarkDoctor.log_runtime.info( 'The `setUpFunction` should take no more than 200 ms.') @staticmethod def _constant_memory_use(measurements): select = BenchmarkDoctor._select (min_i1, max_i1), (min_i2, max_i2) = [ (min(memory_use), max(memory_use)) for memory_use in [[r.mem_pages for r in i_series] for i_series in [ select(measurements, num_iters=i) for i in [1, 2] ]] ] range_i1, range_i2 = max_i1 - min_i1, max_i2 - min_i2 normal_range = 15 # pages name = measurements['name'] more_info = False if abs(min_i1 - min_i2) > max(range_i1, range_i2, normal_range): more_info = True BenchmarkDoctor.log_memory.error( "'%s' varies the memory footprint of the base " "workload depending on the `num-iters`.", name) if max(range_i1, range_i2) > normal_range: more_info = True BenchmarkDoctor.log_memory.warning( "'%s' has very wide range of memory used between " "independent, repeated measurements.", name) if more_info: BenchmarkDoctor.log_memory.info( "'%s' mem_pages [i1, i2]: min=[%d, %d] 𝚫=%d R=[%d, %d]", name, *[min_i1, min_i2, abs(min_i1 - min_i2), range_i1, range_i2]) @staticmethod def _adjusted_1s_samples(runtime): u"""Return sample count that can be taken in approximately 1 second. Based on the runtime (μs) of one sample taken with num-iters=1. """ if runtime == 0: return 2 s = 1000000 / float(runtime) # samples for 1s run s = int(pow(2, round(math.log(s, 2)))) # rounding to power of 2 return s if s > 2 else 2 # always take at least 2 samples def measure(self, benchmark): """Measure benchmark with varying iterations and optimization levels. Returns a dictionary with benchmark name and `PerformanceTestResult`s. """ self.log.debug('Calibrating num-samples for {0}:'.format(benchmark)) r = self.driver.run(benchmark, num_samples=3, num_iters=1, verbose=True) # calibrate num_samples = self._adjusted_1s_samples(r.samples.min) def capped(s): return min(s, 200) run_args = [(capped(num_samples), 1), (capped(num_samples / 2), 2)] opts = self.driver.args.optimization opts = opts if isinstance(opts, list) else [opts] self.log.debug( 'Runtime {0} μs yields {1} adjusted samples per second.'.format( r.samples.min, num_samples)) self.log.debug( 'Measuring {0}, 5 x i1 ({1} samples), 5 x i2 ({2} samples)'.format( benchmark, run_args[0][0], run_args[1][0])) measurements = dict([('{0} {1} i{2}{3}'.format(benchmark, o, i, suffix), self.driver.run(benchmark, num_samples=s, num_iters=i, verbose=True, measure_memory=True)) for o in opts for s, i in run_args for suffix in list('abcde')]) measurements['name'] = benchmark return measurements def analyze(self, benchmark_measurements): """Analyze whether benchmark fullfills all requirtements.""" self.log.debug('Analyzing %s', benchmark_measurements['name']) for rule in self.requirements: rule(benchmark_measurements) def check(self): """Measure and analyse all enabled tests.""" for test in self.driver.tests: self.analyze(self.measure(test)) @staticmethod def run_check(args): """Validate benchmarks conform to health rules, report violations.""" doctor = BenchmarkDoctor(args) doctor.check() # TODO non-zero error code when errors are logged # See https://stackoverflow.com/a/31142078/41307 return 0
contact = '*****@*****.**' tn = telnetlib.Telnet(HOST) endTime = 15 FingerData = "" saveArea = "save.txt" Ignore = ['RoboAdmin','Mamer', 'Relay'] #Welcoming bots that are annoying. This may change later Connected = False logger = logging.getLogger("Mr.Logger 1") logger.setLevel(logging.DEBUG) #set the level to debug status LeLogger = logging.StreamHandler() #get ready to output LeLogger = logging.setLevel(logging.DEBUG) #set handler to debug status format = logging.Formatter("$(asctime)s - %(name)s - %(levelname)s - %(message)s") #format the message LeLogger.setFormatter(format) logger.addHandler(LeLogger) ##Testing # #logger.debug("debug message") # # ##
class RandomAgent(object): #It is the world simplest agent def __init__(self_Agent, action_state_space): self_Agent.action_space = action_state_space def act(self_Agent, observations, rewards, complete): return self_Agent.action_space.sample() if __name__ == '__main__': parser = argparse.ArgumentParser(description=None) parser.add_argument('env_id', nargs='?', default='CartPole-v0', help='Select which environment you want to run') argument = parser.parse_args() # We need to call the method undo_logger_setup , it will undo the # Gym's logger setup and we need to configure it manually # If we dont call it then most of of the time default should be fine gym.undo_logger_setup() logger_Details = logging.getLogger() formating = logging.Formatter('[%(asctime)s] %(message)s') handling = logging.StreamHandler(sys.stderr) handling.setFormatter(formating) logger_Details.addHandler(handling) #We can setup the level to logging.WARN or logging.DEBUG #if we want it to change the amount of output. logging.setLevel(logging.INFO) environment = gym.make(argument.env_id) #We need to provide the output directory to write(It can be an#existing directory, we are including one with existing data.#Kindly notr that all files must be with namespaced).We can also put it to to temporary#directory outdir = '/tmp/results_agent' environment = wrappers.Monitor(env, directory=outdir, force=True) environment.seed(0) agent = RandomAgent(environment.action_space) episode_count = 100 rewards = 0 complete = False for i in range(episode_count): ob = environment.reset() while True: action = agent.act(ob, rewards, complete) ob, rewards, complete, _ = environment.step(action) #Note there 's no environment.render() here. # But the environment still can open window and # render if asked by environment.monitor: it calls environment.render('rgb_array ') # to record video. # Video is not recorded every episode, see capped_cubic_video_schedule for details. # Close the environment and write monitor result info to disk environment.close() # Upload to the scoreboard. We could also do this from another # process if we wanted. logger.info( "Successfully ran RandomAgent. Now trying to upload results to the scoreboard. If it breaks, you can always just try re-uploading the same results." ) gym.upload(outdir)
def main(argv): init_logs('/var/log/atmo/atmo_init_full.log') instance_data = {"atmosphere": {}} service_type = None instance_service_url = None instance_service_url = None server = None root_password = None user_id = None redeploy = False vnclicense = None try: opts, args = getopt.getopt(argv, "rt:u:s:i:T:N:v:", [ "redeploy", "service_type=", "service_url=", "server=", "user_id=", "token=", "name=", "vnc_license=", "root_password="******"Invalid arguments provided.") sys.exit(2) for opt, arg in opts: if opt in ("-t", "--service_type"): instance_data["atmosphere"]["service_type"] = arg service_type = arg elif opt in ("-T", "--token"): instance_data["atmosphere"]["instance_token"] = arg instance_token = arg elif opt in ("-N", "--name"): instance_data["atmosphere"]["name"] = arg instance_token = arg elif opt in ("-u", "--service_url"): instance_data["atmosphere"]["instance_service_url"] = arg instance_service_url = arg elif opt in ("-s", "--server"): instance_data["atmosphere"]["server"] = arg global ATMOSERVER ATMOSERVER = arg server = arg elif opt in ("-i", "--user_id"): instance_data["atmosphere"]["userid"] = arg user_id = arg elif opt in ("-v", "--vnc_license"): vnclicense = arg elif opt in ("-r", "--redeploy"): redeploy = True elif opt in ("--root_password"): root_password = arg elif opt == '-d': global _debug _debug = 1 logging.setLevel(logging.DEBUG) #TODO: What is this line for? source = "".join(args) logging.debug("Atmoserver - %s" % ATMOSERVER) logging.debug("Atmosphere init parameters- %s" % instance_data) global ATMO_INIT_FILES ATMO_INIT_FILES = "%s/api/v1/init_files" % ATMOSERVER logging.debug("Atmosphere init files location- %s" % ATMO_INIT_FILES) set_user_home_dir() add_zsh() if redeploy: redeploy_atmo_init(user_id) else: instance_metadata = get_metadata() logging.debug("Instance metadata - %s" % instance_metadata) deploy_atmo_init(user_id, instance_data, instance_metadata, root_password, vnclicense) logging.info("Atmo Init Completed.. Checking for boot scripts.") run_boot_scripts()
import imp import logging import threading import time import functools import collections logging = logging.getLogger('libPython') logging.setLevel(0) def does_module_exist(module_name): try: imp.find_module(module_name) return True except ImportError: return False # src: http://code.activestate.com/recipes/66472/ def frange(start, end=None, inc=None): "A range function, that does accept float increments..." if end is None: end = start + 0.0 start = 0.0 if inc is None: inc = 1.0 L = [] while 1:
log.debug("will not refresh id %s" % k) save_db(data, db_file) if __name__ == "__main__": args = docopt(__doc__, version="0.krebs") # configure log level lol = args["--log"] numeric_level = getattr(logging, lol.upper(), None) if not isinstance(numeric_level, int): log.error("No such log level %s" % lol) print(__doc__) sys.exit(1) logging.basicConfig(level=numeric_level) log.setLevel(numeric_level) core_log.setLevel(numeric_level) log.debug("Log Level configured to debug") #log.debug(json.dumps(args,indent=4)) ali_home = prepare_home() if args["ali"]: if args["login"]: ali.login() log.info("Successfully logged in") elif args["refresh-open-orders"]: db = os.path.expanduser(args['--db']) confirm_db = os.path.expanduser(args['--confirm-db']) confirm = not args['--noconfirm'] if not confirm: log.info("Will not set confirm date for new orders") try:
# Compatible: # phuslu's GoProxy GAE Server (removed) # https://github.com/phuslu/goproxy/tree/server.gae from . import __version__ import sys sys.dont_write_bytecode = True #这条代码负责添加依赖库路径,不要改变位置 from . import compat import logging from .GlobalConfig import GC logging.setLevel(GC.LISTEN_DEBUGINFO) import os import queue import struct from threading import _start_new_thread as start_new_thread import socket import ssl import re from gevent import __version__ as geventver from OpenSSL import __version__ as opensslver from .common.path import icon_gotox from .ProxyServer import network_test, start_proxyserver from .ProxyHandler import AutoProxyHandler
from django.shortcuts import redirect, render_to_response from django import http from django.views.decorators.cache import cache_control from django.views.decorators.http import etag from django.views.decorators.vary import vary_on_headers from django.views.decorators.csrf import csrf_protect from django.db.models import F # used in Download view from django.template import RequestContext, loader from models import MediaFile # Database table for files from exception import MediasnakError import s3util, upload, listfiles, user, hashlib, delete from os import environ import logging if hasattr(logging, 'setLevel'): logging.setLevel(logging.DEBUG) else: logging.disable(logging.DEBUG) # A note on returning errors and infos: # req.META['HTTP_REFERER'] (sic) gets you the last page the user was on # but this is open to be modified by the user, could be blank etc. so should check it first # then the error/info would need to be set as a global variable, and the user redirected # alternatively, just let the user hit the back button # Utility function for etag decorator def login_template_etag(request, *args, **kwargs): return hashlib.sha1(str(user.get_user_id()) + environ['CURRENT_VERSION_ID']).hexdigest() @cache_control(no_cache=True, max_age=0)
def main(argv): init_logs('/var/log/atmo/atmo_init_full.log') instance_data = {"atmosphere": {}} service_type = None instance_service_url = None instance_service_url = None server = None root_password = None user_id = None vnclicense = None try: opts, args = getopt.getopt(argv, "t:u:s:i:T:N:v:", [ "service_type=", "service_url=", "server=", "user_id=", "token=", "name=", "vnc_license=", "root_password="******"Invalid arguments provided.") sys.exit(2) for opt, arg in opts: if opt in ("-t", "--service_type"): instance_data["atmosphere"]["service_type"] = arg service_type = arg elif opt in ("-T", "--token"): instance_data["atmosphere"]["instance_token"] = arg instance_token = arg elif opt in ("-N", "--name"): instance_data["atmosphere"]["name"] = arg instance_token = arg elif opt in ("-u", "--service_url"): instance_data["atmosphere"]["instance_service_url"] = arg instance_service_url = arg elif opt in ("-s", "--server"): instance_data["atmosphere"]["server"] = arg global ATMOSERVER ATMOSERVER = arg server = arg elif opt in ("-i", "--user_id"): instance_data["atmosphere"]["userid"] = arg user_id = arg elif opt in ("-v", "--vnc_license"): #instance_data["atmosphere"]["vnc_license"] = arg vnclicense = arg elif opt in ("--root_password"): root_password = arg elif opt == '-d': global _debug _debug = 1 logging.setLevel(logging.DEBUG) #TODO: What is this line for? source = "".join(args) logging.debug("Atmoserver - %s" % ATMOSERVER) logging.debug("Atmosphere init parameters- %s" % instance_data) set_user_home_dir() instance_metadata = get_metadata() logging.debug("Instance metadata - %s" % instance_metadata) distro = get_distro() logging.debug("Distro - %s" % distro) linuxuser = instance_data['atmosphere']['userid'] linuxpass = "" public_ip = get_public_ip(instance_metadata) hostname = get_hostname(instance_metadata) set_hostname(hostname, distro) instance_metadata['linuxusername'] = linuxuser instance_metadata["linuxuserpassword"] = linuxpass instance_metadata["linuxuservncpassword"] = linuxpass #TODO: Test this is multi-call safe update_sshkeys(instance_metadata) update_sudoers() if not in_sudoers(linuxuser): add_sudoers(linuxuser) if not in_etc_group('/etc/group', linuxuser): add_etc_group(linuxuser) #is_updated_test determines if this sensitive file needs if not is_updated_test("/etc/ssh/sshd_config"): ssh_config(distro) if root_password: set_root_password(root_password, distro) if not is_rhel(distro): run_command(['/usr/bin/apt-get', 'update']) #else: # run_command(['/usr/bin/yum', 'check-update']) mount_storage() ldap_install() etc_skel_bashrc(linuxuser) run_command(['/bin/cp', '-rp', '/etc/skel/.', '/home/%s' % linuxuser]) run_command([ '/bin/chown', '-R', '%s:iplant-everyone' % (linuxuser, ), '/home/%s' % linuxuser ]) run_command(['/bin/chmod', 'a+rwxt', '/tmp']) run_command(['/bin/chmod', 'a+rx', '/bin/fusermount']) run_command(['/bin/chmod', 'u+s', '/bin/fusermount']) vnc(linuxuser, distro, vnclicense) iplant_files(distro) #atmo_cl() nagios() distro_files(distro) update_timezone() shellinaboxd(distro) insert_modprobe() denyhost_whitelist() modify_rclocal(linuxuser, distro, hostname) notify_launched_instance(instance_data, instance_metadata) logging.info("Complete.")
import string import sys,os import time basedir = os.path.dirname(__file__) name = "upornia" #### LOG SETTINGS import logging info_log = name + '.log' info_log = os.path.join(basedir, info_log) logging.basicConfig() formatter = logging.Formatter("[%(asctime)s] %(levelname)s ==> %(message)s", "%d-%m-%Y %H:%M:%S") log = logging.getLogger() log.setLevel(logging.DEBUG) req_log = logging.getLogger('requests.packages.urllib3') req_log.setLevel(logging.DEBUG) req_log.propagate = True console = logging.StreamHandler() console.setLevel(logging.DEBUG) console.setFormatter(formatter) log.addHandler(console) i_handler = logging.FileHandler(info_log) i_handler.setLevel(logging.INFO) i_handler.setFormatter(formatter) log.addHandler(i_handler) def handle_exception(exc_type, exc_value, exc_traceback): if issubclass(exc_type, KeyboardInterrupt): sys.__excepthook__(exc_type, exc_value, exc_traceback)
( options, content_list ) = option_parser.parse_args( values = Structure() ) for key, value in vars( options ).iteritems(): configuration.set( PROGRAM, key, value ) for key in ( 'local_servers', 'remote_servers', 'content' ): if configuration.has_option( PROGRAM, key ): configuration.set( PROGRAM, key, configuration.get( PROGRAM, key ).strip().replace( ',', '\n' ).split() ) if not content_list and configuration.has_option( PROGRAM, 'content' ): content_list = configuration.get( PROGRAM, 'content' ) log_level = 10*( 6 - configuration.getint( PROGRAM, 'verbosity' ) ) if log_level < logging.DEBUG: log_level = logging.DEBUG + log_level//10 - 1 assert logging.DEBUG - 2 <= log_level <= logging.CRITICAL, 'Invalid log level' logging = logging.getLogger( 'LOCKSS' ) logging.setLevel( log_level ) urlparse.uses_netloc += Content.Action.values sleeper = Sleeper() snapshot = configuration.get( PROGRAM, 'snapshot' ) delete_snapshot = False try: if configuration.getboolean( PROGRAM, 'test' ): content_list, local_clients, remote_clients = self_test_startup() else: local_clients = dict( zip( Content.Action.values, ( [] for value in Content.Action.values ) ) ) for server in configuration.get( PROGRAM, 'local_servers' ): url_components = urlparse.urlparse( server )
print check_data if check_data[0] == 0: print "i dont have value" img = gen_qr_code(url) img.save(img_buf) im_data = img_buf.getvalue() data_url = 'data:image/svg+xml;base64,' + base64.encodestring(im_data) content = json.dumps(data_url.strip()) c.execute( "INSERT OR IGNORE INTO qrcode(shortUrl, qrcode_base) VALUES(?, ?)", (url, content)) conn.commit() else: content = check_data[0] resp = Response(content, mimetype='application/json') resp.headers['Access-Control-Allow-Origin'] = '*' resp.headers['Access-Control-Allow-Method'] = 'GET, OPTIONS' resp.headers['Access-Control-Allow-Headers'] = 'Content-Type' return resp if __name__ == '__main__': description = """QR_iosities API""" level = get_level(os.environ.get('LOG_LEVEL', 'INFO')) logging.setLevel(level) logger = logging.getLogger("QR_iosities") app.debug = True app.run(host='0.0.0.0')
from django.core.handlers import wsgi import pusher import constraint import logging as _ SETTINGS_PATH="django_tornado.settings" _H = _.StreamHandler() _F = _.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging = _.getLogger('') logging.setLevel(_.DEBUG) logging.addHandler(_H) _H.setFormatter(_F) def main(): wsgi_app = tornado.wsgi.WSGIContainer(wsgi.WSGIHandler()) Router = sockjs.tornado.SockJSRouter(pusher.PushClient, '/stream') Router.urls.append((r'/static/(.*)$', tornado.web.StaticFileHandler, {'path': './static'})) Router.urls.append(('.*', tornado.web.FallbackHandler, dict(fallback=wsgi_app))) logging.debug("start") ping = pusher.Pinger() ping.start() tornado_app = tornado.web.Application(Router.urls)
def set_level(level): logging.setLevel(level)
def main(argv): init_logs('/var/log/atmo/atmo_init_full.log') instance_data = {"atmosphere": {}} service_type = None instance_service_url = None instance_service_url = None server = None root_password = None user_id = None redeploy = False vnclicense = None try: opts, args = getopt.getopt( argv, "rt:u:s:i:T:N:v:", ["redeploy", "service_type=", "service_url=", "server=", "user_id=", "token=", "name=", "vnc_license=", "root_password="******"Invalid arguments provided.") sys.exit(2) for opt, arg in opts: if opt in ("-t", "--service_type"): instance_data["atmosphere"]["service_type"] = arg service_type = arg elif opt in ("-T", "--token"): instance_data["atmosphere"]["instance_token"] = arg instance_token = arg elif opt in ("-N", "--name"): instance_data["atmosphere"]["name"] = arg instance_token = arg elif opt in ("-u", "--service_url"): instance_data["atmosphere"]["instance_service_url"] = arg instance_service_url = arg elif opt in ("-s", "--server"): instance_data["atmosphere"]["server"] = arg global ATMOSERVER ATMOSERVER = arg server = arg elif opt in ("-i", "--user_id"): instance_data["atmosphere"]["userid"] = arg user_id = arg elif opt in ("-v", "--vnc_license"): vnclicense = arg elif opt in ("-r", "--redeploy"): redeploy = True elif opt in ("--root_password"): root_password = arg elif opt == '-d': global _debug _debug = 1 logging.setLevel(logging.DEBUG) #TODO: What is this line for? source = "".join(args) logging.debug("Atmoserver - %s" % ATMOSERVER) logging.debug("Atmosphere init parameters- %s" % instance_data) global ATMO_INIT_FILES ATMO_INIT_FILES = "%s/api/v1/init_files" % ATMOSERVER logging.debug("Atmosphere init files location- %s" % ATMO_INIT_FILES) set_user_home_dir() add_zsh() if redeploy: redeploy_atmo_init(user_id) else: instance_metadata = get_metadata() logging.debug("Instance metadata - %s" % instance_metadata) deploy_atmo_init(user_id, instance_data, instance_metadata, root_password, vnclicense) logging.info("Atmo Init Completed.. Checking for boot scripts.") run_boot_scripts()
def set_log(func_name, level): logger = logging.getLogger(func_name) logger = logging.setLevel(level)
def __init__(self): # Initialize external modules loggertools.setLevel(loggertools.Level.WARNING) pluginpath = os.path.dirname(os.path.realpath(__file__)) + "/plugins" super().__init__('housekeeper', pluginpath=pluginpath) self.commands = kit.CommandManager(self) self.cron = kit.CronManager(self, state_file=user_path( utils.UserPathType.DATA, 'state.json')) self.register_extension_point(kit.AppBridge) self.register_extension_point(kit.APIEndpoint) self.register_extension_class(kit.CronCommand) # Read command line app_parser = self.commands.build_base_argument_parser() app_args, dummy = app_parser.parse_known_args(sys.argv[1:]) # Read config files self.settings = kit.YAMLStore() configfiles = [user_path(utils.UserPathType.CONFIG, 'housekeeper.yml')] configfiles.extend(getattr(app_args, 'config-files', [])) for cf in configfiles: try: with open(cf) as fh: self.settings.load(fh) except FileNotFoundError: msg = "Config file «{path}» not found" msg = msg.format(path=cf) self.logger.warning(msg) # Apply command line arguments: debug level cf_log_level = self.settings.get('log-level', None) if cf_log_level: try: level = getattr(logging.Level, cf_log_level, None) logging.setLevel(level) except AttributeError: msg = "Invalid «log-level={level}» key in settings" msg = msg.format(level=cf_log_level) self.logger.error(msg) level = loggertools.getLevel() diff = app_args.verbose - app_args.quiet loggertools.setLevel(loggertools.Level.incr(level, n=diff)) # Initialize cache self.cache = cache.DiskCache( basedir=user_path(utils.UserPathType.CACHE)) # Load plugins for plugin in app_args.plugins: self.load_plugin(plugin) for plugin in self.settings.get('plugin', {}): key = 'plugin.{}.enabled'.format(plugin) if self.settings.get(key, False): self.load_plugin(plugin)
sys.exit(1) if recursion > 3: msg(__name__, 'init:import:{}'.format(name), 'Failed to install module.', logging.error, time_start=time_start, traceback=format_exc()) sys.exit(1) recursion += 1 lib = install_lib(name,recursion=recursion) msg(__name__, 'init:import:{}'.format(name), 'Done.', logging.info, time_start=time_start) return lib log = logging logging = logging.getLogger(__name__) logging.setLevel(log.INFO) msg(__name__,'init','Start.', logging.info, time_start=time_start) msg(__name__,'init:import','Start.', logging.info, time_start=time_start) try: pickle = install_lib('pickle') requests = install_lib('requests') elastics = install_lib('elasticsearch5') except: msg(__name__, 'init:import', 'Fail.', logging.error, time_start=time_start, traceback=format_exc()) sys.exit(1) log.getLogger("requests").setLevel(log.ERROR) log.getLogger("urllib3").setLevel(log.ERROR)
HARDWARE_FLAG = True DEBUG_FLAG = False LIGHT_DEBUG = False import logging logger_config = { #'filename': 'conference_timer.log', 'format': '%(asctime)s - %(message)s', 'datefmt': '%Y-%m-%d %H:%M:%S' } logging.basicConfig(**logger_config) if DEBUG_FLAG: #logger_config['level'] = logging.INFO logger_config['level'] = logging.DEBUG else: logger_config['level'] = logging.WARNING logging = logging.getLogger('conference_timer_logger') logging.setLevel(logger_config['level']) ### Schedule management import path, pathlib with open( path.Path.joinpath(str(pathlib.Path.home()), 'conferenceTimer/scripts/computername.txt'), 'r') as fin: ROOM_CODE = fin.read().splitlines()[0] SCHEDULE_FILE = path.Path.joinpath(str(pathlib.Path.home()), 'conferenceTimer/schedule.csv') del path, pathlib ### Light timing from datetime import timedelta STARTING_WARNING = timedelta(minutes=1)
def writeLog(message): # logger=logging.getLogger() # filename = time.strftime('%Y-%m-%d',time.localtime(time.time())) # handler=logging.FileHandler("../test-log/"+filename+"error") logging.setLevel(logging.NOTSET) logging.info(message)
def setOutput(level): verbose = level logging.setLevel(level)
def main(): version="git-current" try: version=pkg_resources.get_distribution("AutoNetkit").version except: pass # make it easy to turn on and off plotting and deploying from command line usage = ("\nNetkit: %prog -f filename.graphml --netkit\n" "Junosphere: %prog -f filename.graphml --junos\n" "Additional documentation at http://packages.python.org/AutoNetkit/") opt = optparse.OptionParser(usage, version="%prog " + str(version)) opt.add_option('--plot', '-p', action="store_true", dest="plot", default=False, help="Plot lab") opt.add_option('--deploy', action="store_true", default=False, help="Deploy lab to hosts") opt.add_option('--verify', action="store_true", default=False, help="Verify lab on hosts") opt.add_option('--save', action="store_true", default=False, help="Save the network for future use (eg verification") opt.add_option('--file', '-f', default= None, help="Load configuration from FILE") opt.add_option('--bgp_policy', '-b', default= None, help="Load BGP policy statements from FILE") opt.add_option('--debug', action="store_true", default=False, help="Debugging output") # Deployment environments opt.add_option('--netkit', action="store_true", default=False, help="Compile Netkit") opt.add_option('--libvirt', action="store_true", default=False, help="Compile Libvirt") opt.add_option('--cbgp', action="store_true", default=False, help="Compile cBGP") opt.add_option('--dynagen', action="store_true", default=False, help="Compile dynagen") opt.add_option('--junos', action="store_true", default=False, help="Compile Junosphere (legacy command)") # Juniper options opt.add_option('--junosphere', action="store_true", default=False, help="Compile to Junosphere") opt.add_option('--junosphere_olive', action="store_true", default=False, help="Compile to Olive-based Junosphere") opt.add_option('--olive', action="store_true", default=False, help="Compile to Qemu-based Olive") opt.add_option('--olive_qemu_patched', action="store_true", default=False, help="Custom Qemu install (6 interface count") opt.add_option('--isis', action="store_true", default=False, help="Use IS-IS as IGP") opt.add_option('--ospf', action="store_true", default=False, help="Use OSPF as IGP") options, arguments = opt.parse_args() config.add_logging(console_debug = options.debug) #### Main code if not options.file: LOG.warn("Please specify topology file") sys.exit(0) logging.setLevel(logging.DEBUG) #TODO: if topology file doesn't exist, then try inside lib/examples/topologies/ f_name = options.file igp = None if options.ospf: igp = "ospf" if options.isis: igp = "isis" use_junosphere = (options.junos or options.junosphere) inet = Internet(netkit=options.netkit, cbgp=options.cbgp, dynagen=options.dynagen, junosphere=use_junosphere, junosphere_olive=options.junosphere_olive, olive=options.olive, policy_file = options.bgp_policy, deploy = options.deploy, libvirt = options.libvirt, olive_qemu_patched=options.olive_qemu_patched, igp=igp) inet.load(f_name) inet.add_dns() inet.compile() #inet.save() if(options.plot): inet.plot() if(options.deploy): inet.deploy() if options.verify: inet.verify() # finally, save the network inet.save() inet.dump()
def SetLevel(l): logging.setLevel(l)
if action == 0: logging.critical('Error: check options required.') help() else: LOG_FILE=os.path.join(BCK_PATH,'backup-status.log') if not os.path.isdir(BCK_PATH): logging.error("Unable write log file %s. Aborted.", LOG_FILE) sys.exit(1) handler=logging.FileHandler(LOG_FILE, mode='w') handler.setFormatter(logging.Formatter('%(asctime)s: %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')) oLogging.addHandler(handler) if BCK_STATUS_FILE == '-': # Permit to show info/Warning/errors/critical on err while status is printed out to standard output. logging.setLevel(20) if ACTION == 'run': build_backup_status(BCK_PATH, BCK_STATUS_FILE) sys.exit() ######################### # Define global oLogging object. #logging.basicConfig(format='%(asctime)s: %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') if __name__ == "__main__": main(sys.argv[1:])
essLocalFileHandler = logging.handlers.TimedRotatingFileHandler(LogFile, when='W6', backupCount=1040) essLocalFileHandler.setLevel(LogLevel) essLocalFileHandler.setFormatter(essFormatter1) #essLocalFileHandler.doRollover() ########################### # LocalConsoleHandler essConsoleHandler = logging.StreamHandler(sys.stdout) essConsoleHandler.setLevel(LogLevel) essConsoleHandler.setFormatter(essFormatter2) ########################## # Add handlers to default logger if MultiProc: logger = multiprocessing.get_logger() logger.setLevel(LogLevel) logging = logging.getLogger('') logging.setLevel(0) logging.addHandler(essLocalFileHandler) if MultiProc: logger.addHandler(essLocalFileHandler) if Console: logging.addHandler(essConsoleHandler) if MultiProc: logger.addHandler(essConsoleHandler) logging.debug('LogFile: ' + str(LogFile)) logging.debug('Time: ' + str(Time)) logging.debug('Status: ' + str(Status)) logging.debug('Run: ' + str(Run)) AgentIdentifierValue = ESSDB.DB().action('ESSConfig','GET',('Value',),('Name','AgentIdentifierValue'))[0][0] ExtDBupdate = int(ESSDB.DB().action('ESSConfig','GET',('Value',),('Name','ExtDBupdate'))[0][0]) x=WorkingThread(ProcName)
### Debug and Logging controls HARDWARE_FLAG = True DEBUG_FLAG = False LIGHT_DEBUG = False import logging logger_config = {#'filename': 'conference_timer.log', 'format': '%(asctime)s - %(message)s', 'datefmt': '%Y-%m-%d %H:%M:%S'} logging.basicConfig(**logger_config) if DEBUG_FLAG: #logger_config['level'] = logging.INFO logger_config['level'] = logging.DEBUG else: logger_config['level'] = logging.WARNING logging = logging.getLogger('conference_timer_logger') logging.setLevel(logger_config['level']) ### Schedule management import path, pathlib with open(path.Path.joinpath(str(pathlib.Path.home()), 'conferenceTimer/scripts/computername.txt'), 'r') as fin: ROOM_CODE = fin.read().splitlines()[0] SCHEDULE_FILE = path.Path.joinpath(str(pathlib.Path.home()), 'conferenceTimer/schedule.csv') del path, pathlib ### Light timing from datetime import timedelta STARTING_WARNING = timedelta(minutes=1) TALK_WARNING = timedelta(minutes=3) QUESTION_WARNING = timedelta(minutes=1)