def __init__(self, cachedir, mmap_mode=None, compress=False, verbose=1): """ Parameters ---------- cachedir: string or None The path of the base directory to use as a data store or None. If None is given, no caching is done and the Memory object is completely transparent. mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional The memmapping mode used when loading from cache numpy arrays. See numpy.load for the meaning of the arguments. compress: boolean Whether to zip the stored data on disk. Note that compressed arrays cannot be read by memmapping. verbose: int, optional Verbosity flag, controls the debug messages that are issued as functions are revaluated. """ # XXX: Bad explaination of the None value of cachedir Logger.__init__(self) self._verbose = verbose self.mmap_mode = mmap_mode self.timestamp = time.time() self.compress = compress if compress and mmap_mode is not None: warnings.warn('Compressed results cannot be memmapped', stacklevel=2) if cachedir is None: self.cachedir = None else: self.cachedir = os.path.join(cachedir, 'joblib') mkdirp(self.cachedir)
def __init__(self, subject, *args): """Set up a TASK child class environment. Initialise the Global Configuration, the Logger, the system load routines. Define a list of dependencies prerequisite to run this tasks. Define, create and aliases a Working directory for the tasks. If more arguments have been supplied to generic tasks, GenericTask will create an alias for each additionnal arg adding the suffix Dir to the name provided on the first optionnal arg provided to __init__ """ self.__order = None self.__name = self.__class__.__name__.lower() self.__moduleName = self.__class__.__module__.split(".")[-1] self.__cleanupBeforeImplement = True self.config = subject.getConfig() self.subject = subject self.subjectDir = self.subject.getDir() self.toadDir = self.config.get('arguments', 'toad_dir') self.workingDir = os.path.join(self.subjectDir, self.__moduleName) self.tasksAsReferences = None Logger.__init__(self, subject.getLogDir()) Load.__init__(self, self.config) self.dependencies = [] self.__dependenciesDirNames = {} for arg in args: self.dependencies.append(arg)
def __init__(self, host = 'localhost', port = 10000, name = "TCP client", debug = False): Logger.__init__(self, debug) self.name = name self.address = (host, port) self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print "%s created, dont forget to connect!" % (self.name)
def __init__(self, caLoc=os.path.expanduser('~'), csca=None, cscaKey=None, opensslLocation=""): """ Initiate the CA infrastructure. @caLoc: The location where the openssl config files will be stored @param csca: An existing CSCA Certificate in PEM @param cscaKey: The private key of the CSCA in PEM @param opensslLocation: The openssl executable location """ Logger.__init__(self, "CA") self._csca = csca self._cscaKey = cscaKey self._loc = os.path.normpath(caLoc) self._loc = os.path.join(self._loc, 'ca') self._configFile = os.path.join(self._loc, 'openssl.cfg') try: os.mkdir(self._loc) except: pass self._openssl = OpenSSL('"' + self._configFile + '"') self._openssl.register(self._traceOpenSSl)
def __init__(self, debug=False, log=None, data_dir="data"): Logger.__init__(self, log, debug) # TODO: to config stop_words = "stop_words.txt" punct = "punct_symb.txt" sent_end = "sentence_end.txt" abbr = "abbr.txt" senti_words = "product_senti_rus.txt" # found features in all texts self.stat = { 'text_cnt': 0, 'avg_sentence_per_text': 0, 'avg_bigram_per_sentence': 0 } self.tokenizer = Tokenizer(debug, log, data_dir, stop_words, punct, sent_end, abbr, senti_words) self.stat['token_stat'] = self.tokenizer.get_token_stat_schema() self.feature_creator = FeatureGetter(debug=self.debug, weight_func_type="senti_trigram", range_val=2) self.csv_writer = None self.csv_writer_f = None
def __init__(self, namespace, client, on_message=None): #change default logging from INFO to DEBUG Logger.__init__(self, level=logging.DEBUG) client.on_connect = self._on_connect client.on_message = on_message or self._on_message self._namespace = namespace
def __init__(self, state): Logger.__init__(self, sys.stdout, state['verbose'], 'Pipeline') self._s = state # internal state of the crosswalk self._log = sys.stdout self._subjLangClassifier = None self._typeClassifier = None self._files = [] # list of input files self._s['tmpfiles'] = []
def __init__(self, config_options={}): Logger.__init__(self, config_options) if "filename" in config_options: try: self.filename = config_options["filename"] self.file_handle = open(self.filename, "w+") except Exception, e: raise RuntimeError("Couldn't open log file %s for appending: %s" % (self.filename, e))
def __init__(self, config_options={}): Logger.__init__(self, config_options) try: self.filename = config_options["filename"] self.header = config_options["header"] self.footer = config_options["footer"] self.folder = config_options["folder"] except Exception: print "Missing required value for HTML Logger"
def __init__(self, itemID, holdings, instructions, queryObject): Logger.__init__(self, sys.stdout, DEBUGMODE, 'RecBuild') self.ItemID = itemID self.HoldingIDs = holdings # a list of holdingIDs self.qo = queryObject self.Fields = list() self.Instructions = instructions self.Log("\nProcessing Record %s" % itemID) self._build()
def __init__(self, config_options): Logger.__init__(self, config_options) try: self.host = config_options["host"] self.port = int(config_options["port"]) self.hostname = socket.gethostname() except: raise RuntimeError("missing config options for network monitor")
def __init__(self, host = 'localhost', port = 10000, name = "UDP server", debug = False): Logger.__init__(self, debug) self.name = name self.address = (host, port) self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #UDP -> SOCK_DGRAM self.socket.bind(self.address) socket.setdefaulttimeout(1) print "%s is up and running" % (self.name)
def __init__(self): self.pem_dir = os.environ['PEM_DIR'] Logger.__init__(self) self.spreadsheet = GoogleSpreadsheet() self.logger.info('Downloading spreadsheets') self.users = self.spreadsheet.get_users() self.instances = self.spreadsheet.get_instances() self.permissions = self.spreadsheet.get_permissions() self.logger.info('Spreadsheets are downloaded')
def __init__(self, config, storage, replay_buffer, state=None): set_all_seeds(config.seed) self.run_tag = config.run_tag self.group_tag = config.group_tag self.worker_id = 'learner' self.replay_buffer = replay_buffer self.storage = storage self.config = deepcopy(config) if "learner" in self.config.use_gpu_for: if torch.cuda.is_available(): if self.config.learner_gpu_device_id is not None: device_id = self.config.learner_gpu_device_id self.device = torch.device("cuda:{}".format(device_id)) else: self.device = torch.device("cuda") else: raise RuntimeError( "GPU was requested but torch.cuda.is_available() is False." ) else: self.device = torch.device("cpu") self.network = get_network(config, self.device) self.network.to(self.device) self.network.train() self.optimizer = get_optimizer(config, self.network.parameters()) self.lr_scheduler = get_lr_scheduler(config, self.optimizer) self.scalar_loss_fn, self.policy_loss_fn = get_loss_functions(config) self.training_step = 0 self.losses_to_log = {'reward': 0., 'value': 0., 'policy': 0.} self.throughput = { 'total_frames': 0, 'total_games': 0, 'training_step': 0, 'time': { 'ups': 0, 'fps': 0 } } if self.config.norm_obs: self.obs_min = np.array(self.config.obs_range[::2], dtype=np.float32) self.obs_max = np.array(self.config.obs_range[1::2], dtype=np.float32) self.obs_range = self.obs_max - self.obs_min if state is not None: self.load_state(state) Logger.__init__(self)
def activateLogDir(self): """ create the log directory and create the versions.xml file The log dir should be avtivate only if this object is tested as a valid toad subjects See Valifation.isAToadSubject() """ if not os.path.exists(self.__logDir): self.info("creating log dir {}".format(self.__logDir)) os.mkdir(self.__logDir) Logger.__init__(self, self.__logDir)
def __init__(self, qo, id, line): Logger.__init__(self, sys.stdout, DEBUGMODE, 'Query') self.qo = qo self.id = id self.sql = None #useful for accessing the sql externally self.map = dict() for e in line: # standardize on single quotes self.map[e] = line[e].replace('"', "'")
def __init__(self, itemID, instructions, queryObject): Logger.__init__(self, sys.stdout, DEBUGMODE, 'FFieldBuild') self.ItemID = itemID self.DataChar = dict() # get tag from first line self.Tag = instructions[0]['Tag'] self.qo = queryObject self.Instructions = instructions self._build()
def __init__(self, func, cachedir, ignore=None, mmap_mode=None, compress=False, verbose=1, timestamp=None): """ Parameters ---------- func: callable The function to decorate cachedir: string The path of the base directory to use as a data store ignore: list or None List of variable names to ignore. mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional The memmapping mode used when loading from cache numpy arrays. See numpy.load for the meaning of the arguments. verbose: int, optional Verbosity flag, controls the debug messages that are issued as functions are revaluated. The higher, the more verbose timestamp: float, optional The reference time from which times in tracing messages are reported. """ Logger.__init__(self) self._verbose = verbose self.cachedir = cachedir self.func = func self.mmap_mode = mmap_mode self.compress = compress if compress and mmap_mode is not None: warnings.warn('Compressed results cannot be memmapped', stacklevel=2) if timestamp is None: timestamp = time.time() self.timestamp = timestamp if ignore is None: ignore = [] self.ignore = ignore mkdirp(self.cachedir) try: functools.update_wrapper(self, func) except: " Objects like ufunc don't like that " if inspect.isfunction(func): doc = pydoc.TextDoc().document(func).replace('\n', '\n\n', 1) else: # Pydoc does a poor job on other objects doc = func.__doc__ self.__doc__ = 'Memoized version of %s' % doc
def __init__(self, arguments): """Schedule and execute pipeline tasks Args: arguments: command lines arguments specified by the user """ self.arguments = arguments self.config = Config(self.arguments).getConfig() self.studyDir = self.config.get('arguments', 'studyDir') Logger.__init__(self)
def __init__(self, config, path=None): Logger.__init__(self, config) if not path: self.is_valid = False return self.file_path = os.path.abspath(path) self.is_valid = True self.file = open(self.file_path, 'w+') self.write_headers()
def __init__(self, control): # Base class init call Logger.__init__(control) self._sat_home_path = envstore.store.get_sat_home() self._t32_dir = os.path.join(self._sat_home_path, 'tracer') self._t32_scripts_directory = os.path.join(self._sat_home_path, 'tracer', 't32_scripts') # Add default kernel module parameter for PTI-tracing self._kernel_module_parameters += " trace_method=0 " # Add more option to command line input self._parser.add_argument('-a', '--autofocus', action='store_true', help='Run Autofocus before tracing', required=False) self._parser.add_argument( '-i', '--init', action='store_true', help='Init target, configure target for tracing', required=False) self._parser.add_argument( '-w', '--pti_width', action='store', help='Set PTI width to 8,12,16, defaults to 16 bit', required=False) self._parser.add_argument('-r', '--rawstp', action='store_true', help='Fetch raw stp stream', required=False) self._parser.add_argument('-s', '--sbpti', action='store_true', help='Send Sideband data trought PTI', required=False) self._parser.add_argument('-d', '--disable_sleep', action='store_true', help='Disable C6 sleep state', required=False) self.args = self._parser.parse_args() # Initialize T32 API try: t32 = __import__('satt.trace.t32') except ImportError: self._t32api = None else: self._t32api = t32.T32Api()
def __init__(self, arguments, xmlSoftwareVersions): """Schedule and execute pipeline tasks Args: arguments: command lines arguments specified by the user xmlSoftwareVersions: a minidom xml structure containing versions of various softwares. this structure will be delegate to Subject class """ self.arguments = arguments self.config = Config(self.arguments).getConfig() self.softwareVersions = xmlSoftwareVersions Logger.__init__(self)
def __init__(self): Logger.__init__(self) self.running = False # 当self.stop == True 时,该任务不再执行 self.stop = False self.interval = None self.date = None self.week = None self.hour = 0 self.minute = 0 self.timeout = None self.runtime = 0 self.starttime = 0
def __init__(self, host="localhost", port=10001, name="TCP server", debug=False): Logger.__init__(self, debug) self.name = name self.address = (host, port) self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.bind(self.address) self.socket.listen(1) print "%s is up and running" % (self.name)
def __init__(self, session_bus, arguments): ''' init docs ''' dbus.service.Object.__init__(self, session_bus, DSC_FRONTEND_PATH) Logger.__init__(self) self.simulate = "--simulate" in arguments global debug_flag debug_flag = "--debug" in arguments self.in_wizard_showing = False self.init_hide = False
def __init__(self, persist=None, cachefile=None ): Logger.__init__(self) if persist is None != cachefile is None: raise Exception("arguments coherence error") tgtdir = os.path.dirname(cachefile) if tgtdir != "" and not os.path.exists(tgtdir): self.logger.error( "directory (%s) not found" % tgtdir ) raise Exception("directory doesn't exist") self.cachefile = cachefile self.persist = persist self.load()
def __init__(self, client_addr = ('localhost', 10000), server_addr = ('localhost', 10001), name = "UDP to TCP proxy", debug = False): Logger.__init__(self, debug) self.name = name self.address = client_addr self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.bind(self.address) self.socket.listen(1) self.udp_server_address = server_addr self.udp_server_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.handlers = [self.socket, sys.stdin] print "%s is up and running" % (self.name)
def __init__(self, func, cachedir, ignore=None, mmap_mode=None, compress=False, verbose=1, timestamp=None): """ Parameters ---------- func: callable The function to decorate cachedir: string The path of the base directory to use as a data store ignore: list or None List of variable names to ignore. mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional The memmapping mode used when loading from cache numpy arrays. See numpy.load for the meaning of the arguments. verbose: int, optional Verbosity flag, controls the debug messages that are issued as functions are revaluated. The higher, the more verbose timestamp: float, optional The reference time from which times in tracing messages are reported. """ Logger.__init__(self) self._verbose = verbose self.cachedir = cachedir self.func = func self.mmap_mode = mmap_mode self.compress = compress if compress and mmap_mode is not None: warnings.warn('Compressed results cannot be memmapped', stacklevel=2) if timestamp is None: timestamp = time.time() self.timestamp = timestamp if ignore is None: ignore = [] self.ignore = ignore mkdirp(self.cachedir) try: functools.update_wrapper(self, func) except: " Objects like ufunc don't like that " if inspect.isfunction(func): doc = pydoc.TextDoc().document(func ).replace('\n', '\n\n', 1) else: # Pydoc does a poor job on other objects doc = func.__doc__ self.__doc__ = 'Memoized version of %s' % doc
def __init__(self, itemID, instructions, queryObject): Logger.__init__(self, sys.stdout, DEBUGMODE, 'SFieldBuild') self.ItemID = itemID self.SubFields = list() # eg. ['a', 'the title', 'c', 'the author'] self.Indicators = list() # eg. ['0', '1'] # get tag from first line self.Tag = instructions[0]['Tag'] # A list of SubField lists, in the case # that this field is repeatable self.ListOfSubFields = list() self.qo = queryObject self.Instructions = instructions self._build()
def __init__(self, control): # Base class init call Logger.__init__(self, control) self._sat_home_path = envstore.store.get_sat_home() # Add default kernel module parameter for -tracing self._kernel_module_parameters += " trace_method=1 sideband_log_method=1" # Add more option to command line input self._parser.add_argument('-b', '--buffers', action='store', help='Amount of RTIT buffers to use', required=False) self.args = self._parser.parse_args() if self.args.buffers: self._kernel_module_parameters += " max_trace_buffers=" + self.args.buffers
def __init__(self, itemID, holdingIDs, instructions, queryObject): Logger.__init__(self, sys.stdout, DEBUGMODE, 'HoldBuild') self.ItemID = itemID self.HoldingIDs = holdingIDs self.SubFields = list( ) # intentionally left empty and not used: only referenced in GetMarcFields # get tag from first line self.Tag = instructions[0]['Tag'] # A list of SubField lists, since these are holdings and a title may have more than one holding # that this field is repeatable # eg. list of these lists: ['a', 'the title', 'c', 'the author'] self.ListOfSubFields = list() self.qo = queryObject self.Instructions = instructions self._build()
def __init__(self, data_name, agent_nums, use_localhost=True): Logger.__init__(self) # get model and train args by "data_name" self.switch = Switch(data_name=data_name) self.model = self.switch.get_model(is_server=True) self.train_args = self.switch.get_train_args() self.data_name = data_name self.train_args.dataSet = data_name self.train_args.agent_nums = agent_nums # server socket setting self.server_port_begin = 8080 self.server_socks = [] # agent host port list for testing self.agent_port_begin = 2048 self.agents_attrs = [] # stored data from agent self.train_data_nums = [0] * self.train_args.agent_nums self.test_data_nums = [0] * self.train_args.agent_nums self.all_train_data_nums = 0 self.all_test_data_nums = 0 # training setting self.train_args.start_epoch = 1 self.use_localhost = use_localhost self.is_simulate = self.train_args.is_simulate self.is_first_training = True self.train_args.cuda = not self.train_args.no_cuda and torch.cuda.is_available() torch.manual_seed(self.train_args.seed) # seeding the CPU for generating random numbers so that the results are # deterministic # save date = time.strftime("%m-%d_%H-%M-%S", time.localtime()) self.train_args.save_path = "record/"+self.train_args.dataSet+"/"+date+"/" self.preds = [] self.targets = [] if self.train_args.cuda: torch.cuda.manual_seed(self.train_args.seed) # set a random seed for the current GPU self.model.cuda() # move all model parameters to the GPU self.optim = optim.Adam(self.model.parameters(), lr=self.train_args.lr)
def __init__(self, data_dir="data", log=None, debug=False, \ dict_obj="FreqSentDictObj", word_vec="word_vec.json", weight_func_type='tfidf', range_val=None): Logger.__init__(self, log, debug) self.not_found_words = {} self.stat = {'not_found': 0, 'all': 0} self.range_val = range_val self.doc_cnt = 38369 self.feature_schema = [ 'pos_sent', 'neg_sent', 'pos_weight', 'neg_weight' ] if weight_func_type == 'tfidf': self.weight_func = FeatureGetter.tfidf_weights elif weight_func_type == 'senti_trigram': if self.range_val is None: self.__print__('ERR', "range val is not setted") sys.exit(1) self.weight_func = FeatureGetter.senti_freq_weights else: self.__print__( 'ERR', "unknown weight function type ('{}')".format(weight_func_type)) try: f = open(data_dir + '/' + word_vec, 'r') self.word_vec = json.load(f) f.close() index = 0 for w in self.word_vec.keys(): self.word_vec[w] = index index += 1 self.__print__( 'DEB', 'word vec dimension is {}'.format(len(self.word_vec.keys()))) f = open(data_dir + '/' + dict_obj, 'r') self.freq_dict = pickle.load(f) f.close() except Exception as e: self.__print__('ERR', "unable to init: {}".format(str(e))) sys.exit(1)
def __init__(self, config): """A valid individual who have the capability to run tasks. This class have the responsability to write a document of the softwares and versions into the log directory Must be validated as a prerequisite Args: config: a self.config ConfigParser object. """ self.__config = config self.__subjectDir = self.__config.get('arguments', 'subjectDir') self.__name = os.path.basename(self.__subjectDir) self.__logDir = os.path.join(self.__subjectDir, self.__config.get('dir', 'log')) #the subject logger must be call without file information during initialization Logger.__init__(self) Lock.__init__(self, self.__logDir, self.__name) Validation.__init__(self, self.__subjectDir, self.__config)
def __init__(self, client_addr=('localhost', 10000), server_addr=('localhost', 10001), name="TCP to TCP proxy", debug=False): Logger.__init__(self, debug) self.name = name self.address = client_addr self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) # toplevel import... socket wtf? self.socket.bind(self.address) self.socket.listen(1) self.tcp_server_addr = server_addr self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.tcp_server_socket.connect(self.tcp_server_addr) self.handlers = [self.socket, sys.stdin] print "%s is up and running" % (self.name)
def __init__(self, data_dir="data", log=None, debug=False, \ dict_obj="FreqSentDictObj", word_vec="word_vec.json", weight_func_type='tfidf', range_val=None): Logger.__init__(self, log, debug) self.not_found_words = {} self.stat = { 'not_found': 0, 'all': 0 } self.range_val = range_val self.doc_cnt = 38369 self.feature_schema = ['pos_sent', 'neg_sent', 'pos_weight', 'neg_weight'] if weight_func_type == 'tfidf': self.weight_func = FeatureGetter.tfidf_weights elif weight_func_type == 'senti_trigram': if self.range_val is None: self.__print__('ERR', "range val is not setted") sys.exit(1) self.weight_func = FeatureGetter.senti_freq_weights else: self.__print__('ERR', "unknown weight function type ('{}')".format(weight_func_type)) try: f = open(data_dir + '/' + word_vec, 'r') self.word_vec = json.load(f) f.close() index = 0 for w in self.word_vec.keys(): self.word_vec[w] = index index += 1 self.__print__('DEB', 'word vec dimension is {}'.format(len(self.word_vec.keys()))) f = open(data_dir + '/' + dict_obj, 'r') self.freq_dict = pickle.load(f) f.close() except Exception as e: self.__print__('ERR', "unable to init: {}".format(str(e))) sys.exit(1)
def __init__(self, mainloop, session_bus): self.session_bus = session_bus dbus.service.Object.__init__(self, self.session_bus) Logger.__init__(self) self.mainloop = mainloop self.is_run_in_daemon = True self.exit_flag = False self.is_in_update_list = False self.update_status = None self.system_bus = None self.bus_interface = None self.delay_update_id = None self.update_num = 0 self.remind_num = 0 self.net_detector = NetworkDetector() self.loginfo("Start Update List Daemon")
def __init__(self, config_options={}): Logger.__init__(self, config_options) if "filename" in config_options: self.filename = config_options["filename"] else: raise RuntimeError("Missing filename for json filename")
def __init__(self, persist = 24*3600, cachefile=".cacheurl.db" ): Logger.__init__(self) self.cache = CacheUrl( cachefile=cachefile, persist=persist )
def __init__(self, libpath, javaparams, isverbose = False): Logger.__init__(self, sys.stdout, isverbose, 'XSLT') self._libpath = libpath self._params = javaparams