import os import time import urllib import argparse from core.config import Config from core.logger import Logger from selenium import webdriver from selenium.webdriver.chrome.options import Options logger = Logger(logger="Auto-Submit").getlog() config = Config() def open_browser(args): ''' Windows User need to set the binary and chromedrive path ''' options = Options() #options.binary_location = config.binary_prefix options.add_argument("--headless") options.add_argument("--disable-gpu") options.add_argument("--disable-dev-shm-usage") browser = webdriver.Chrome(options=options) #browser = webdriver.Chrome(config.cd_prefix, options=options) return browser def colse_browser(args, browser): try: browser.close()
def __init__(self): self.logger = Logger(__name__)
def __init__(self): Path(os.getcwd() + self.CACHE_DIR).mkdir(parents=True, exist_ok=True) self.logger = Logger(__name__)
def __init__(self): self.logger = Logger(__name__) self.settings = {} self.db_cache = {} self.change_listeners = {}
from plugin.managers.m_trakt.credential import TraktOAuthCredentialManager from plugin.models import TraktAccount from plugin.modules.core.manager import ModuleManager from plugin.preferences import Preferences from plugin.scrobbler.core.session_prefix import SessionPrefix from plex import Plex from plex_activity import Activity from plex_metadata import Metadata from six.moves.urllib.parse import quote_plus, urlsplit, urlunsplit from requests.packages.urllib3.util import Retry from trakt import Trakt import os import uuid log = Logger() class Main(object): modules = [ # core UpdateChecker() ] def __init__(self): Header.show(self) # Initial configuration update self.on_configuration_changed() # Initialize clients
from core.helpers import all from core.logger import Logger from core.plugin import PLUGIN_VERSION_BASE from lxml import etree import shutil import os log = Logger('core.migrator') class Migrator(object): migrations = [] @classmethod def register(cls, migration): cls.migrations.append(migration()) @classmethod def run(cls): for migration in cls.migrations: log.debug('Running migration %s', migration) migration.run() class Migration(object): @property def code_path(self): return Core.code_path @property def plex_path(self):
def __init__(self): self.buddy_list = {} self.buddy_list_size = 1000 self.logger = Logger("buddy_manager")
def __init__(self): self.logger = Logger(__name__) self.hub = {} self.sources = []
def __init__(self, connection_pool): self.logger = Logger(self.__class__.__name__).get() self.connection_pool = connection_pool
def __init__(self, filename): self.filename = filename self.logger = Logger(__name__)
def __init__(self, filename): self.filename = filename self.logger = Logger("mmdb_parser")
def __init__(self): self.logger = Logger("command_alias_manager")
def __init__(self): self.socket = None self.char_id = None self.char_name = None self.logger = Logger("Budabot")
def __init__(self): self.buddy_list = {} self.buddy_list_size = 0 self.logger = Logger(__name__)
def main(argv=None): parser = argparse.ArgumentParser( description="Client tool for changing boot order via Redfish API.") parser.add_argument("-H", help="iDRAC host address") parser.add_argument("-u", help="iDRAC username", required=True) parser.add_argument("-p", help="iDRAC password", required=True) parser.add_argument("-i", help="Path to iDRAC interfaces yaml", default=None) parser.add_argument("-t", help="Type of host. Accepts: foreman, director") parser.add_argument("-l", "--log", help="Optional argument for logging results to a file") parser.add_argument("-f", "--force", dest='force', action='store_true', help="Optional argument for forced clear-jobs") parser.add_argument("--host-list", help="Path to a plain text file with a list of hosts.", default=None) parser.add_argument("--pxe", help="Set next boot to one-shot boot PXE", action="store_true") parser.add_argument( "--boot-to", help="Set next boot to one-shot boot to a specific device") parser.add_argument( "--boot-to-type", help="Set next boot to one-shot boot to either director or foreman") parser.add_argument( "--boot-to-mac", help= "Set next boot to one-shot boot to a specific MAC address on the target" ) parser.add_argument("--reboot-only", help="Flag for only rebooting the host", action="store_true") parser.add_argument( "--power-cycle", help="Flag for sending ForceOff instruction to the host", action="store_true") parser.add_argument("--racreset", help="Flag for iDRAC reset", action="store_true") parser.add_argument("--check-boot", help="Flag for checking the host boot order", action="store_true") parser.add_argument("--firmware-inventory", help="Get firmware inventory", action="store_true") parser.add_argument("--export-configuration", help="Export system configuration to XML", action="store_true") parser.add_argument("--clear-jobs", help="Clear any schedule jobs from the queue", action="store_true") parser.add_argument("-v", "--verbose", help="Verbose output", action="store_true") parser.add_argument("-r", "--retries", help="Number of retries for executing actions.", default=RETRIES) args = vars(parser.parse_args(argv)) log_level = DEBUG if args["verbose"] else INFO logger = Logger() logger.start(level=log_level) if args["log"]: file_handler = FileHandler(args["log"]) file_handler.setFormatter(Formatter(logger.LOGFMT)) file_handler.setLevel(DEBUG) logger.addHandler(file_handler) host_list = args["host_list"] host = args["H"] if host_list: try: with open(host_list, "r") as _file: for _host in _file.readlines(): try: execute_badfish(_host.strip(), args, logger) except SystemExit: continue except IOError as ex: logger.debug(ex) logger.error("There was something wrong reading from %s" % host_list) elif not host: logger.error( "You must specify at least either a host (-H) or a host list (--host-list)." ) else: execute_badfish(host, args, logger) return 0
def __init__(self): self.handlers = {} self.logger = Logger(__name__) self.event_types = [] self.db_cache = {}
def __init__(self): self.logger = Logger(__name__) self.private_channel_conn = None
def __init__(self): self.logger = Logger("access_manager")
self.c2server.close() self.c2server.wait() if self.gui_server: self.gui_server.close() self.gui_server.wait() if self.gui: self.gui.close() @Slot() def exit(self): self.close() QApplication.instance().exit() if __name__ == '__main__': logger = Logger() logger.enable() qInstallMessageHandler(qt_message_handler) def exception_hook(exctype, value, tb): logging.critical(''.join(traceback.format_exception(exctype, value, tb))) sys.exit(1) sys.excepthook = exception_hook parser = argparse.ArgumentParser(description=__app_name__) parser.add_argument('--nogui', action='store_true', help='run in headless mode') parser.add_argument('--reset', action='store_true', help='remove application settings') parser.add_argument('-r', '--remote', action='store_true', help='run in remote control mode') parser.add_argument('-V', '--version', action='version', version='v{}'.format(__version__), help='print version and exit') parser.add_argument("--log-level", default=logging.NOTSET,
def __init__(self): self.logger = Logger(__name__) self.jobs = [] self.job_id_index = 0
args.device = torch.device( 'cuda:{}'.format(args.device) if torch.cuda.is_available() else 'cpu') model = LeNetVDO(args).to(args.device) args.batch_size, args.test_batch_size = 32, 32 train_loader, test_loader = load_mnist(args) args.data_size = len(train_loader.dataset) for layer in model.children(): i = 0 if hasattr(layer, 'log_alpha'): fmt.update({'{}log_alpha'.format(i + 1): '3.3e'}) i += 1 logger = Logger('lenet-vdo', fmt=fmt) logger.print(args) logger.print(model) criterion = ClassificationLoss(model, args) optimizer = torch.optim.Adam( [p for p in model.parameters() if p.requires_grad], lr=args.lr) for epoch in range(args.epochs): t0 = time() model.train() model.set_flag('zero_mean', False) criterion.step() elbo, cat_mean, kls, accuracy = [], [], [], []
from core.eventing import EventManager from core.helpers import get_pref from core.logger import Logger from data.watch_session import WatchSession from plex.plex_media_server import PlexMediaServer from plex.plex_metadata import PlexMetadata from pts.scrobbler import Scrobbler, ScrobblerMethod log = Logger('pts.scrobbler_websocket') class WebSocketScrobbler(ScrobblerMethod): name = 'WebSocketScrobbler' def __init__(self): super(WebSocketScrobbler, self).__init__() EventManager.subscribe('notifications.playing', self.update) @classmethod def test(cls): if PlexMediaServer.get_sessions() is None: log.info( "Error while retrieving sessions, assuming WebSocket method isn't available" ) return False server_info = PlexMediaServer.get_info() if server_info is None: log.info('Error while retrieving server info for testing') return False
#!/user/bin/env python #coding=utf-8 ''' @project : bitest @author : djcps #@file : testBase.py #@ide : PyCharm #@time : 2019-05-05 10:23:02 ''' import requests from json import dumps from core.logger import Logger from requests.packages.urllib3.exceptions import InsecureRequestWarning logger = Logger().logger # 禁用安全请求警告 requests.packages.urllib3.disable_warnings(InsecureRequestWarning) class BaseTest(requests.Session): ''' 接口基类,供后续脚本使用 '''
from core.eventing import EventManager from core.helpers import get_pref from core.logger import Logger from data.watch_session import WatchSession from plex.plex_media_server import PlexMediaServer from plex.plex_metadata import PlexMetadata from plex.plex_preferences import PlexPreferences from pts.scrobbler import Scrobbler, ScrobblerMethod log = Logger('pts.scrobbler_logging') class LoggingScrobbler(ScrobblerMethod): name = 'LoggingScrobbler' def __init__(self): super(LoggingScrobbler, self).__init__() EventManager.subscribe('scrobbler.logging.update', self.update) @classmethod def test(cls): # Try enable logging if not PlexPreferences.log_debug(True): log.warn('Unable to enable logging') # Test if logging is enabled if not PlexPreferences.log_debug(): log.warn( 'Debug logging not enabled, unable to use logging activity method.' )
from endpoint.index.handler import generateDocument, generateRepresentation from endpoint.index.model import Post from core.feluda import ComponentType, Feluda from core.logger import Logger import json import requests from os import environ log = Logger(__name__) secret = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjMwN2IzMTYwLTE3MjktNDI2MS04MjExLTU1YzFlOTc1ZWQ2NCIsInVzZXJuYW1lIjoiYWRtaW4iLCJyb2xlIjoiYWRtaW4iLCJpYXQiOjE2NDI2Nzc4MTh9.p9UZ1xt1kOSyBTBMr3IoeONroZZVJYfUHcM7d9CHdR0" headersAuth = { "Authorization": "Basic " + str(secret), } def reporter(ch, method, properties, body): print("MESSAGE RECEIVED") # print(type(body)) # print(type(json.loads(body))) report = json.loads(json.loads(body)) # print(type(report)) log.prettyprint(report) try: requests.post( environ.get("KOSH_API_URL") + "/index/report", headers=headersAuth, json=report, ) ch.basic_ack(delivery_tag=method.delivery_tag)
def __init__(self): self.access_levels = [ {"label": "none", "level": 0, "handler": self.no_access}, {"label": "all", "level": 100, "handler": self.all_access}] self.logger = Logger(__name__)
'te_acc_ens100': '.4f', 'te_acc_stoch': '.4f', 'te_acc_ens10': '.4f', 'te_acc_perm_sigma': '.4f', 'te_acc_zero_mean': '.4f', 'te_acc_perm_sigma_ens': '.4f', 'te_acc_zero_mean_ens': '.4f', 'te_nll_ens100': '.4f', 'te_nll_stoch': '.4f', 'te_nll_ens10': '.4f', 'te_nll_perm_sigma': '.4f', 'te_nll_zero_mean': '.4f', 'te_nll_perm_sigma_ens': '.4f', 'te_nll_zero_mean_ens': '.4f', 'time': '.3f'} logger = Logger("lenet5-VDO", fmt=fmt) net = LeNet5() net.cuda() logger.print(net) trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor()) trainloader = torch.utils.data.DataLoader(trainset, batch_size=200, shuffle=True, num_workers=4, pin_memory=True) testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor()) testloader = torch.utils.data.DataLoader(testset, batch_size=200, shuffle=False, num_workers=4, pin_memory=True)
from core.db import DB from core.logger import Logger from core.registry import Registry db = Registry.get_instance("db") logger = Logger("core.upgrade") def table_info(table_name): if db.type == DB.MYSQL: data = db.query("DESCRIBE %s" % table_name) def normalize_table_info(row): row.name = row.Field row.type = row.Type.upper() return row return list(map(normalize_table_info, data)) elif db.type == DB.SQLITE: return db.query("PRAGMA table_info(%s)" % table_name) else: raise Exception("Unknown database type '%s'" % db.type) def table_exists(table_name): try: db.query("SELECT * FROM %s LIMIT 1" % table_name) return True except Exception: return False
from core.dict_object import DictObject from core.logger import Logger from core.registry import Registry logger = Logger("core.decorators") # taken from: https://stackoverflow.com/a/26151604/280574 def parameterized(dec): def layer(*args, **kwargs): def repl(f): return dec(f, *args, **kwargs) return repl return layer @parameterized def instance(cls, name=None, override=False): instance_name = name if name else cls.__name__ Registry.add_instance(instance_name, cls(), override) return cls @parameterized def command(handler, command, params, access_level, description, sub_command=None, help_file=None, extended_description=None): handler.command = [command, params, access_level, description, help_file, sub_command, extended_description] return handler @parameterized
def __init__(self, inbound_queue, url): self.logger = Logger(__name__) self.inbound_queue = inbound_queue self.url = url self.ws = None