def addRules(self, ruleList): for line in ruleList: r = line.split("#")[0].strip() if r == "": continue if r.lower().startswith("host"): rs = r.split() if rs[1].lower() == "localhost": self.hostMap.setLocalhost(rs[2]) else: self.hostMap.setHostAddress(rs[1], rs[2]) continue rs = r.split() if len(rs) < 2: continue if len(rs) > 3: pars = quoteParams(rs[2:]) rs = rs[:2] + pars rs[0] = rs[0].upper() goodrule = [ 1 for desc in ['SA', 'ID', 'HPAR'] if rs[0] == desc] if len(goodrule) > 0: if rs[2].startswith("[") and rs[2].endswith("]"): # TODO: Should leave hosts as they are; let _update_cast_hostnames take care of translation host = rs[2].strip(" []") if self.hostMap.isKnownHost(host): rs[2] = self.hostMap.expandHostName(host) self.rules.append(rs) continue logger.get().warn("Bad rule: '%s'" % line)
def prepareServerConfig(self): result = [] opts = options.getCastOptions() # define appenders sm = self.servers[self.selectedServer] appenders = sm['appenders'] for k,appndr in appenders.items(): result.append("#+ Section: " + appndr[0]) level = self.serverConsoleLevel if k == 'console' and level != 'OFF': conf = opts.getSection(appndr[0]) for ln in conf: ln = ln.replace('${LEVEL}', level) result.append(ln) level = self.serverXmlFileLevel if k == 'xmlfile' and level != 'OFF': self._prepareLogFile() conf = opts.getSection(appndr[0]) for ln in conf: ln = ln.replace('${LEVEL}', level) ln = ln.replace('${LOGFILE}', self.logFile) result.append(ln) # cleanup result = [ln.strip() for ln in result if not ln.strip().startswith("log4j.rootLogger=")] result = self._removeComments(result) # setup rootLogger self._insertRootLogger(result) self._prepareLogDir() try: f = open(self.serverConfigFile, "w") f.write("\n".join(result)) f.close() except Exception as e: logger.get().error("%s" % e) serverLink = os.path.join(self.logServerDir, self.logPropLink) try: if os.path.exists(serverLink): st = os.lstat(serverLink) if not stat.S_ISLNK(st.st_mode): os.rename(self.logPropLink, os.tempnam(self._logDir, "srv." + self.logPropLink)) os.remove(serverLink) os.symlink(self.serverConfigFile, serverLink) except Exception as e: logger.get().error("%s" % e)
def __init__(self): self.ci_users = {} self.cfg = config.Config() self.db = db_helper.DBHelper(self.cfg).get() logger.init(self.cfg) self.log = logger.get('scoreboard-gerrit-listener') self.g = None
def __init__(self, num_cls=10, filename=None, name=None, cmap='gray'): super(ConfusionMatrixPlotter, self).__init__( filename=filename, name=name) self.num_cls = num_cls self.cmap = cmap self.log = logger.get() pass
def __init__(self, num, batch_size=1, progress_bar=False, log_epoch=10, get_fn=None, cycle=False, shuffle=True, stagnant=False): """Construct a batch iterator. Args: data: numpy.ndarray, (N, D), N is the number of examples, D is the feature dimension. labels: numpy.ndarray, (N), N is the number of examples. batch_size: int, batch size. """ self._num = num self._batch_size = batch_size self._step = 0 self._num_steps = int(np.ceil(self._num / float(batch_size))) self._pb = None self._variables = None self._get_fn = get_fn self.get_fn = get_fn self._cycle = cycle self._shuffle_idx = np.arange(self._num) self._shuffle = shuffle self._random = np.random.RandomState(2) self._shuffle_flag = shuffle self._stagnant = stagnant self._log_epoch = log_epoch self._log = logger.get() self._epoch = 0 if progress_bar: self._pb = pb.get(self._num_steps) pass self._mutex = threading.Lock() pass
def saveCharSelection(self): f = open('save', 'r') data = f.readline().split() f.close() if int(data[1]) == 0 : f = open('save', 'w') f.write( data[0] + " " + str(logger.get().char) + " " + data[2] + " " + data[3] ) f.close()
def __init__(self, folder, name): if not os.path.exists(folder): os.makedirs(folder) self.folder = folder self.log = logger.get() self.fname = os.path.join(folder, name + '.yaml') pass
def prepareClientConfig(self): opts = options.getCastOptions() sm = self.servers[self.selectedServer] result = [] appndr = sm['client-connect'] conf = opts.getSection(appndr[0]) result.append("#+ Section: " + appndr[0]) for ln in conf: if sm['id'] == 'console': ln = ln.replace('${LEVEL}', self.serverConsoleLevel) else: ln = ln.replace('${PORT}', self.serverPort) ln = ln.replace('${HOST}', self.serverHost) result.append(ln) result += self.readCustomLogLevels() # cleanup result = [ln.strip() for ln in result if not ln.strip().startswith("log4j.rootLogger=")] result = self._removeComments(result) # setup rootLogger self._insertRootLogger(result) clientfile = self.clientConfigFile self._prepareLogDir() try: f = open(clientfile, "w") f.write("\n".join(result)) f.close() except Exception as e: logger.get().error("%s" % e) try: if os.path.exists(self.logPropLink): st = os.lstat(self.logPropLink) if not stat.S_ISLNK(st.st_mode): os.rename(self.logPropLink, os.tempnam(self._logDir, "cli." + self.logPropLink)) os.remove(self.logPropLink) os.symlink(clientfile, self.logPropLink) except Exception as e: logger.get().error("%s" % e)
def __init__(self): #center screen os.environ['SDL_VIDEO_CENTERED'] = '1' #initialize pygame lib pygame.init() #creates window self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) pygame.display.set_caption('The Avengers - Six Guys') logger.get().setScreen(self.screen) #Make a camera (this might need to go inside the level object, but that's ok) self.camera = camera.Camera(self.screen) logger.get().setCamera(self.camera) #number of the current level self.levelNumber = 0 #default 1, change for debugging self.currLevel = self.getCurrentLevel() logger.get().setLevel(self.levelNumber) #player starts with 3 lives self.player_lives = constants.PLAYER_LIVES self.invincible = False #menus self.startMenu = startmenu.StartMenu() self.pauseMenu = pausemenu.PauseMenu() logger.get().setMenu(self.startMenu) logger.get().setAvengersObj(self) #the hud self.hud = hud.HUD() #I wanna listen to my music while I develop dammit! if "-m" in sys.argv: sound.set_bgm_vol(0) sound.set_sfx_vol(0) self.hud.vol = False #Skip all that clicking, ain't nobody got time for that if "-p" in sys.argv: self.startMenu.playing = True
def _prepareLogFile(self): opts = options.getCastOptions() self._prepareLogDir() head = opts.getSection("LOG4J.SimpleSocketServer.XMLLayout.head") user = "******" now = time.strftime("%H:%M:%S", time.localtime(time.time())) result = [] for ln in head: ln = ln.replace('${USER}', user) ln = ln.replace('${NOW}', now) result.append(ln) result = self._removeComments(result) try: f = open(os.path.join(self._logDir, self._logFile), 'w') f.write("\n".join(result)) f.close() except Exception as e: logger.get().error("%s" % e)
def loadLevel(self, state = 1): if state == 1 : fname = 'save' else : fname = 'replay' f = open(fname, 'r') data = f.readline().split() f.close() #get level number from save file self.levelNumber = int(data[0]) #set current level -> replay doesn't restart level if state == 1 : self.currLevel = self.getCurrentLevel() #set chosen player choice = int(data[1]) if choice == 1: self.currLevel.player = player.Hulk(0,0,self.currLevel) elif choice == 2: self.currLevel.player = player.Thor(0,0,self.currLevel) elif choice == 3: self.currLevel.player = player.CaptainAmerica(0,0,self.currLevel) elif choice == 4: self.currLevel.player = player.IronMan(0,0,self.currLevel) elif choice == 5: self.currLevel.player = player.Hawkeye(0,0,self.currLevel) elif choice == 6: self.currLevel.player = player.BlackWidow(0,0,self.currLevel) self.currLevel.charsel.setChar(choice) #set player coords self.currLevel.player.rect.x = int(data[2]) self.currLevel.player.rect.y = int(data[3]) #logger if state == 1 : logger.get().setStart(self.currLevel.player.rect.x, self.currLevel.player.rect.y) self.currLevel.charSelected = True self.currLevel.plotOver = True #begin playing level self.startMenu.loadLevel = False self.startMenu.playing = True
def __init__(self, folder, fname='model', var_dict=None): if not os.path.exists(folder): os.makedirs(folder) self.folder = folder self.log = logger.get() self.fname = fname self.tf_saver = None if var_dict is None: self.var_dict = tf.all_variables() else: self.var_dict = var_dict pass
def readCustomLogLevels(self): result = [] section = "" reSection = re.compile(r"^\[([-a-zA-Z0-9.]*)\]") if not os.path.exists(self.loggerLevelsFilename): if self.loggerLevelsFilename != None and self.loggerLevelsFilename != "": logger.get().error( "File '%s' not found. Using default logging levels." % self.loggerLevelsFilename) else: f = open(self.loggerLevelsFilename) lines = self._removeComments(f.readlines()) f.close() for ln in lines: ln = ln.strip() if ln == "": continue if ln.startswith("#"): continue mo = reSection.match(ln) if mo != None: section = mo.group(1).strip() if section != "": section += "." continue if ln.find("*") < 0: result.append("log4j.logger." + section + ln) else: parts = ln.split("=", 2) if len(parts) < 2: continue reName = (section + parts[0]).replace(".", r"\.").replace("*", ".*") reName = re.compile("^" + reName + "$") for cname in self.componentNames: mo = reName.match(cname) if mo == None: continue result.append("log4j.logger." + cname + "=" + parts[1]) return result
def test_mnist_model(self): self._logger = logger.get() self._logger.info("----------- test_mnist_model") base_model_uri = self.get_test_model_repo() filename = os.path.join(base_model_uri, "models", "mnist", "mnist.zip") local_torch_file = download_and_extract_model(filename, model_extension=".pth") input_shape = (28, 28, 1) output_shape = (1, 1, 10) model = MNist(input_shape, output_shape) verifier = torch_verifier.TorchModelVerifier(model, local_torch_file, input_shape, output_shape) verifier.load_image(os.path.join(script_path, "eight.jpg")) # bugbug: compiled model is crashing on Linux. llvm-8 bug? verify = os.name == 'nt' verifier.verify(verify_compiled=verify)
def __init__(self): self.arg_parser = argparse.ArgumentParser( "This script takes a model directory and generates a basic markdown file that describes the model\n") self.modeldir = None self.model = None self.outfile = None self.template = None self.model_data = {} self.printexe = None self.platforms = { "pi3" : "Raspberry Pi 3 (Raspbian) @ 700MHz", "pi3_64" : "Raspberry Pi 3 (OpenSUSE) @ 600MHz", "aarch64" : "DragonBoard 410c @ 1.2GHz" } self.user = _get_default_user() self.logger = logger.get()
def test_onnx_model(self): self._logger = logger.get() self._logger.info("----------- test_onnx_model") input_shape = (32, 32, 3) output_shape = (1, 1, 20) model = TestNet(input_shape, output_shape) torch_shape = (1, 3, 32, 32) tensor = torch.randn(torch_shape) model.forward(tensor) torch.save(model.state_dict(), "model.pth") verifier = torch_verifier.TorchModelVerifier(model, "model.pth", input_shape, output_shape) verifier.add_input(tensor) # bugbug: since we have random input, sometimes the ELL compiled model fails, so the verification # of compiled model is disabled in this case until we resolve this verifier.verify(verify_compiled=False)
def __init__(self, folder, model_opt=None, data_opt=None): if not os.path.exists(folder): os.makedirs(folder) self.folder = folder self.log = logger.get() self.tf_saver = None if model_opt is not None: self.save_opt(os.path.join(folder, kModelOptFilename), model_opt) if data_opt is not None: self.save_opt(os.path.join(folder, kDatasetOptFilename), data_opt) pass
def get_filtered_layers_list(modelLayers, maxLayerCount=None): """Returns a relevant list of CNTK layers and layer objects """ _logger = logger.get() # Go through the layers and append layer objects to the relevantLayers list relevantLayers = [] lastSoftmaxLayer = None for currentLayer in modelLayers: if (isinstance(currentLayer, cntk_py.Function)): if (LayerFactory.has_inputs(currentLayer)): layerObject = LayerFactory.get_layer_object(currentLayer) if (layerObject is not None): relevantLayers.append(layerObject) elif currentLayer.op_name == 'CrossEntropyWithSoftmax': # ugly hack for CrossEntropyWithSoftmax # CrossEntropyWithSoftmax pops up in the beginning of the layers list # because the input is connected to it (it's used for evaluating training) lastSoftmaxLayer = SoftmaxLayer(currentLayer) else: _logger.warning("Will not process " + currentLayer.op_name + " - empty input shape.") if (lastSoftmaxLayer is not None): # Retroactively insert a softmax layer relevantLayers.append(lastSoftmaxLayer) if (maxLayerCount is not None): maxLayerCount = min(maxLayerCount, len(relevantLayers)) relevantLayers = relevantLayers[0:maxLayerCount] # Go through the layers and set the output characteristics: # - padding parameters for output, based on the next layer's input # - output shape, which is adjusted to include the padding currentLayer = None for i in range(len(relevantLayers)): currentLayer = relevantLayers[i] if (i < (len(relevantLayers) - 1)): # Use the next layer's input characteristics to set the output for this layer nextLayer = relevantLayers[i + 1] currentLayer.set_output_characteristics(nextLayer) else: # This is the last layer, so the output characteristics are known currentLayer.set_output_characteristics(None) _logger.info(currentLayer) return relevantLayers
def __init__(self, num, batch_size=1, progress_bar=False, log_epoch=10, get_fn=None, cycle=False, shuffle=True, stagnant=False, seed=2, num_batches=-1): """Construct a batch iterator. Args: data: numpy.ndarray, (N, D), N is the number of examples, D is the feature dimension. labels: numpy.ndarray, (N), N is the number of examples. batch_size: int, batch size. """ self._num = num self._batch_size = batch_size self._step = 0 self._num_steps = int(np.ceil(self._num / float(batch_size))) if num_batches > 0: self._num_steps = min(self._num_steps, num_batches) self._pb = None self._variables = None self._get_fn = get_fn self.get_fn = get_fn self._cycle = cycle self._shuffle_idx = np.arange(self._num) self._shuffle = shuffle self._random = np.random.RandomState(seed) if shuffle: self._random.shuffle(self._shuffle_idx) self._shuffle_flag = False self._stagnant = stagnant self._log_epoch = log_epoch self._log = logger.get() self._epoch = 0 if progress_bar: self._pb = pb.get(self._num_steps) pass self._mutex = threading.Lock() pass
def test_raspberryPi(self): global cluster log = logger.get() for target in targets: log.info( "=============== Testing platform: {} ===================". format(target)) with drivetest.DriveTest(cluster=cluster, target=target, target_dir="/home/pi/test", username="******", password=password, expected="coffee mug", timeout=300, apikey=key, gitrepo=gitrepo) as driver: driver.run_test()
def print_layer(layer): _logger = logger.get() PRETTY_TYPE_MAP = { "convolutional": "BinaryConvolution" if layer.get("xnor") == 1 else "Convolution", "connected": "FullyConnected", "maxpool": "MaxPooling", "avgpool": "AveragePooling", "softmax": "Softmax", "region": "RegionDetection" } pretty_type = PRETTY_TYPE_MAP.get(layer.get("type")) if not pretty_type: return _logger.info( ("{} : {h}x{w}x{c} -> {out_h}x{out_w}x{out_c}" " | input padding {inputPadding} output padding {outputPadding}" ).format(pretty_type, **layer))
def __init__(self, cluster=None, ipaddress=None, username=None, password=None, source_dir=None, target_dir=None, copyback_files=None, copyback_dir=None, command=None, logfile=None, start_clean=True, cleanup=True, timeout=None, all=None, source_files=None, apikey=None): self.cluster = cluster if isinstance(cluster, str): self.cluster = picluster.PiBoardTable(cluster, apikey) self.ipaddress = ipaddress self.username = username self.password = password self.source_dir = source_dir self.source_files = source_files self.target_dir = target_dir self.copyback_files = copyback_files self.copyback_dir = copyback_dir self.command = command self.start_clean = start_clean self.cleanup = cleanup self.logfile = logfile self.timeout = timeout self.all = all self.machine = None self.ssh = None self.buffer = None # global logger is hooked up to parent modules by module name and this # logger can see all the remote command output from all commands, which # will be formatted differently with "ThreadId: " prefix so user can # make sense of the combined output when remote commands are running in # parallel. if self.logfile: self.logger = logger.setup(self.logfile) else: self.logger = logger.get() if not cluster and not ipaddress: raise Exception("Error: required ipaddress or cluster or both") # Sanity-check parameters if self.target_dir and os.path.pathsep in self.target_dir: raise Exception("Error: multilevel target directories not supported")
def __init__(self, height, width, output_fname, semantic_only=True): self.height = height self.width = width self.semantic_only = semantic_only self.log = logger.get() self.output_fname = output_fname self.log.info("Output h5 dataset: {}".format(self.output_fname)) self.log.info("Reading image IDs") self.img_ids = self.read_ids() # Shuffle sequence. random = np.random.RandomState(2) shuffle = np.arange(len(self.img_ids)) random.shuffle(shuffle) self.img_ids = [ self.img_ids[shuffle[idx]] for idx in range(len(self.img_ids)) ] pass
def get_layer_object(cntkLayer): _logger = logger.get() try: if (cntkLayer.op_name == 'Activation'): return ActivationLayer(cntkLayer) elif (cntkLayer.op_name == 'AveragePooling'): return AveragePoolingLayer(cntkLayer) elif (cntkLayer.op_name == 'BatchNormalization'): return BatchNormalizationLayer(cntkLayer) elif (cntkLayer.op_name == 'Convolution'): if (cntkLayer.is_block): return ConvolutionLayer(cntkLayer) else: return BinaryConvolutionLayer(cntkLayer) elif (cntkLayer.op_name == 'Dense'): return DenseLayer(cntkLayer) elif (cntkLayer.op_name == 'ElementTimes'): return ElementTimesLayer(cntkLayer) elif (cntkLayer.op_name == 'LeakyReLU'): return LeakyReLULayer(cntkLayer) elif (cntkLayer.op_name == 'linear'): # Note: this op_name is lowercase return LinearLayer(cntkLayer) elif (cntkLayer.op_name == 'MaxPooling'): return MaxPoolingLayer(cntkLayer) elif (cntkLayer.op_name == 'Minus'): return NegativeBiasLayer(cntkLayer) elif (cntkLayer.op_name == 'Plus'): return BiasLayer(cntkLayer) elif (cntkLayer.op_name == 'Pooling'): return PoolingLayer(cntkLayer) elif (cntkLayer.op_name == 'PReLU'): return PReLULayer(cntkLayer) elif (cntkLayer.op_name == 'ReLU'): return ReLULayer(cntkLayer) elif (cntkLayer.op_name == 'Softmax'): return SoftmaxLayer(cntkLayer) else: _logger.warning("Will not process " + cntkLayer.op_name + "- skipping this layer as irrelevant.") except (ValueError, AttributeError) as e: # raised if a layer contains invalid characteristics _logger.info("\nWill not process", cntkLayer.op_name, "-", str(e)) return None
def __init__(self, status_func=None): self._status_func = status_func self.playing = False config = Config('space_window.conf', __file__) self._player = config.get('player', 'player', _player) self._player_args = config.get('player', 'player_args', _player_args) self._player_pl_args=config.get('player','playlist_player_args',\ _player_args) self._player_cmd = self._player + ' ' + self._player_args self._player_pl_cmd = self._player + ' ' + self._player_pl_args self._player_stream_cmd = self._player_pl_cmd self._player_fbcp = '' fbdev = config.get('pygame', 'fbdev', 'None') if fbdev != 'None': self._player_fbcp = 'fbcp&' self._player_cmd = 'fbcp& ' + self._player_cmd self._player_pl_cmd = 'fbcp& ' + self._player_pl_cmd _log = logger.get(__name__)
def __init__(self): self.logger = logger.get() self.arg_parser = argparse.ArgumentParser( "This script takes a path to an ELL-models model folder hierarchy and plots cost-accuracy curves\n" "that can be used to select the 'best' models") # model self.models_root = None self.model_paths = [] self.model_stats = [] self.frontier_models = [] # output self.output_figure = "model_speed_accuracy.png" self.output_format = "png" self.output_frontier_json_file = "frontier_models.json" self.output_all_models_json_file = "all_models.json" # plot self.plot_series = [] self.plot_max_secs_per_frame = 1.5 self.plot_min_top1_accuracy = 20 self.plot_targets = ["pi3"] # reference: https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot self.platforms_symbols = { "pi3": "ro", # red dot "pi3_64": "bs", # blue square "aarch64": "g^" # green triangle } self.platforms_lines = { "pi3": "r-", # red line "pi3_64": "b-", # blue line "aarch64": "g-" # green line } self.platforms_legend = { "pi3": "Raspberry Pi3/Raspbian", "pi3_64": "Raspberry Pi3/SUSE", "aarch64": "Dragonboard D410c" }
def __init__(self, batch_iter, max_queue_size=10, num_threads=5, log_queue=20, name=None): """ Data provider wrapper that supports concurrent data fetching. """ super(ConcurrentBatchIterator, self).__init__() self.max_queue_size = max_queue_size self.num_threads = num_threads self.q = Queue.Queue(maxsize=max_queue_size) self.log = logger.get() self.batch_iter = batch_iter self.fetchers = [] self.init_fetchers() self.counter = 0 self.relaunch = True self.log_queue = log_queue self.name = name pass
def __init__(self): self.config = None self.files = [] self.includes = [] self.tcc = [] self.tools = None self.language = "python" self.target = "host" self.verbose = False self.llvm_format = None self.no_opt_tool = False self.no_llc_tool = False self.profile = False self.blas = True self.optimize = True self.optimization_level = None self.fuse_linear_ops = True self.debug = False self.model_name = "" self.func_name = "Predict" self.objext = "o" self.logger = logger.get()
def process_network(network, weightsData, convolutionOrder): """Returns an ell.neural.NeuralNetworkPredictor as a result of parsing the network layers""" ellLayers = [] _logger = logger.get() for layer in network: if layer['type'] in ['net', 'cost', 'dropout']: # Known layers that can be safely ignored or don't require processing pass elif layer['type'] == 'convolutional': ellLayers += process_convolutional_layer(layer, weightsData, convolutionOrder) elif layer['type'] == 'connected': ellLayers += process_fully_connected_layer(layer, weightsData) elif layer['type'] == 'maxpool': ellLayers.append( get_pooling_layer(layer, ell.neural.PoolingType.max)) elif layer['type'] == 'avgpool': ellLayers.append( get_pooling_layer(layer, ell.neural.PoolingType.mean)) elif layer['type'] == 'softmax': ellLayers.append(get_softmax_layer(layer)) elif layer['type'] == 'region': ellLayers.append(get_region_detection_layer(layer)) else: _logger.warning( "Skipping unrecognized layer %s. The model may still work without it", layer['type']) if ellLayers: # Darknet expects the input to be between 0 and 1, so prepend # a scaling layer with a scale factor of 1/255 parameters = ellLayers[0].parameters ellLayers = [get_first_scaling_layer(parameters)] + ellLayers predictor = ell.neural.NeuralNetworkPredictor(ellLayers) return predictor
def predictor_from_cntk_model_using_new_engine(modelFile, plotModel=True): """ Loads a CNTK model and returns an ell.neural.NeuralNetworkPredictor """ _logger = logger.get() _logger.info("Loading...") z = load_model(modelFile) _logger.info("\nFinished loading.") if plotModel: filename = os.path.join(os.path.dirname(modelFile), os.path.basename(modelFile) + ".svg") cntk_utilities.plot_model(z, filename) try: _logger.info("Pre-processing...") # Get the relevant nodes from CNTK that make up the model importer_nodes = cntk_utilities.Utilities.get_model_nodes(z) _logger.info("\nFinished pre-processing.") # Create an ImporterModel from the CNTK nodes importer_model = import_nodes(importer_nodes) # Use the common importer engine to drive conversion of the # ImporterModel to ELL layers importer_engine = common.importer.ImporterEngine() ell_layers = importer_engine.convert(importer_model) # Create an ELL neural network predictor from the layers predictor = ell.neural.NeuralNetworkPredictor(ell_layers) except BaseException as exception: _logger.error( "Error occurred attempting to convert cntk layers to ELL layers: " + str(exception)) raise exception return predictor
def __init__(self): self.arg_parser = argparse.ArgumentParser( "This script performs a validation pass on a given model\n" "on a target device (such as a Raspberry Pi) using scp, and retrieves the validation result\n" "Pre-requisites:\n" " 1) the model files, deployed to /home/pi/pi3 (or similar) using drivetest.py\n" " 2) the validation set, copied to /home/pi/validation (or similar) using copy_validation_set.py\n" ) self.ipaddress = None self.cluster = None self.username = "******" self.password = "******" self.model_name = None self.labels = "categories.txt" self.maxfiles = 200 self.ssh = None self.target_dir = "/home/pi/pi3" self.target = "pi3" self.truth = "/home/pi/validation/val_map.txt" self.images = "/home/pi/validation" self.test_dir = None self.output_dir = None self.machine = None self.logger = logger.get()
def perform_logging(): """ perform the logging task loop reading the logging queue and write messages to output log file """ start_time = time.time() while not stopped(): try: line = get() if line: try: print >>open(local_log_name,"a+"), line if verbose: print >>sys.stderr, line except: print >>sys.stderr, "Invalid Log Line!" try: # every 5 minutes checkpoint the log file to the server for safe keeping if time.time() - start_time > 300: start_time = time.time() if not dryrun: fs_mod.fs_put(local_log_name,remote_log_name,verbose=verbose) except: print >>sys.stderr,"Error checkpointing log file!" except: print >>sys.stderr, "Exception while logging!" continue
def logConf(d): import logger log = logger.get() for k, v in d.iteritems(): log.info("%s: %s" % (k, v))
# -*- coding: utf-8 -*- """Argument Parser for Linot input commands This modules rewrites or extends functions of `argparse` in the Python Starndard Library. It simplifies the command interface so that services can be developed without worrying about the complexity of user inputs """ from __future__ import print_function from argparse import SUPPRESS, ArgumentParser import re from io import BytesIO import logger logger = logger.get().getLogger(__name__) class LinotParser(ArgumentParser): """Extends the usibility of ArgumentParser Attributes: (same with ArgumentParser in standard library) """ def __init__(self, *args, **kwargs): ArgumentParser.__init__(self, *args, **kwargs) self._sub_parser = None self._direct_commands = [] def get_sub_parser(self):
import asyncio from enum import IntEnum import json import os import aiohttp import database as db import util import logger import perf import retry log = logger.get("ARCHIVER") ERROR_MISSING_ACCESS = 50001 class HttpError(RuntimeError): def __init__(self, response): super().__init__("HTTP status {0}".format(response.status)) self.response = response class ChannelType(IntEnum): GUILD_TEXT = 0 DM = 1 GUILD_VOICE = 2 GROUP_DM = 3 GUILD_CATEGORY = 4 def response_is_error(response):
def __init__(self, ipaddress=None, cluster=None, outdir=None, profile=False, model=None, labels=None, target="pi3", target_dir="/home/pi/pi3", username="******", password="******", iterations=1, expected=None, blas=True, compile=COMPILE_INCREMENTAL, test=True, verbose=True, timeout=None): self.ipaddress = ipaddress self.build_root = find_ell.find_ell_build() self.ell_root = os.path.dirname(self.build_root) self.output_dir = outdir self.target_dir = target_dir self.labels_file = labels self.ell_model = model self.username = username self.password = password self.target = target self.cluster = cluster self.blas = blas self.expected = expected self.profile = profile self.compile = compile self.test = test self.verbose = verbose self.logger = logger.get() if timeout: self.timeout = int(timeout) else: self.timeout = None self.iterations = iterations # local state. self.model_name = None self.machine = None self.ell_json = None self.created_dirs = [] self.gallery_url = "https://github.com/Microsoft/ELL-models/raw/master/models/ILSVRC2012/" # initialize state from the args if not self.output_dir: self.output_dir = "test" self.test_dir = os.path.abspath(self.output_dir) if os.path.isdir(self.test_dir): if self.compile == COMPILE_FULL: rmtree(self.test_dir) else: if self.compile == COMPILE_NONE: raise Exception( "Test only usage requires outdir '{}' to exist already". format(self.output_dir)) os.makedirs(self.test_dir) if self.compile: self.extract_model_info(self.ell_model, self.labels_file) self.output_dir = os.path.join(self.test_dir, self.target) if self.test: self.resolve_address(self.ipaddress, self.cluster)
from streamlink import Streamlink from cache import SynchronisedCache as Cache import time import os from functools import partial from itertools import chain from player_base import PlayerBase import threading import logger _default_res = '360p' _chunk_size = 10240 _cache_size = 50 _thread_id = 0 _cnt = 1 _log = logger.get(__name__) def _next_thread_id(): global _thread_id _thread_id += 1 return _thread_id class Streamer(PlayerBase): def __init__(self, status_func=None): PlayerBase.__init__(self, status_func) self.qualities_cache = Cache(_cache_size) self.streamlink = Streamlink() #check if this works for multiple streams self._output = self._create_output()
def _update_cast_hostnames(self, lines): self._cast_hostnames = {} ordHosts = [] removelines = [] for i,line in enumerate(lines): mo = CCastConfig.reHostName.match(line) if mo != None: self._cast_hostnames[mo.group(1)] = mo.group(2) removelines.append(i) ordHosts.append(mo.group(1)) lastremoved = removelines[-1] - len(removelines) + 1 if len(removelines) else 0 for i in reversed(removelines): lines.pop(i) # add localhost definition if not "localhost" in self._cast_hostnames: self._cast_hostnames["localhost"] = self.hostMap.fixLocalhost("localhost") ordHosts.append("localhost") # add additional hosts from hconf for k,v in self.hostMap.items(): if k in self._cast_hostnames: # update the value self._cast_hostnames[k] = v continue mo = re.match("^[a-zA-Z][a-zA-Z0-9_]*$", k) if not mo: continue; ordHosts.append(k) self._cast_hostnames[k] = v # topological sorting of hosts # if host uses target, target must be defined first def uses(host, target): if target == "localhost" and self._cast_hostnames[host] == target: return True if self._cast_hostnames[host] == "[%s]" % target: return True return False changed = True retries = 20 while changed and retries > 0: neword = [] changed = False while len(ordHosts) > 0: host = ordHosts.pop(0) i = 0 while i < len(ordHosts): if uses(host, ordHosts[i]): neword.append(ordHosts.pop(i)) changed = True continue i += 1 neword.append(host) ordHosts = neword retries -= 1 if retries < 1: logger.get().warn("HOSTNAME-s COULD NOT BE SORTED.") for h in reversed(ordHosts): lines.insert(lastremoved, "HOSTNAME %s %s" % (h, self._cast_hostnames[h])) last = len(ordHosts) + lastremoved # remove invalid HOST declarations defaultHostFound = False otherHeaderFound = False for i,line in enumerate(lines): mo = CCastConfig.reHost.match(line) if mo != None: host = mo.group(1).strip(" []") valid = host in self._cast_hostnames if not valid: lines[i] = "# disabled " + line if valid and not defaultHostFound: defaultHostFound = not otherHeaderFound continue if not otherHeaderFound and self._isHeader(line): otherHeaderFound = True continue # add the missing HOST if not defaultHostFound: lines.insert(last, "HOST localhost") last = last + 1
# # Requires: Python 3.5+ # #################################################################################################### import argparse import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../utilities/pythonlibs')) sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib')) import find_ell # noqa 401 import ell import logger _logger = logger.get() import cntk_to_ell import ziptools # This function is also used from cntk_to_ell_importer_test.py which is why it takes args def main(args): arg_parser = argparse.ArgumentParser( description="Converts CNTK model to ELL model\n" "Example:\n" " cntk_import.py model.cntk\n" "This outputs 'model.ell' which can be compiled with ELL's 'wrap' tool\n") arg_parser.add_argument( "cntk_model_file", help="path to a CNTK model file, or a zip archive of a CNTK model file")
chr(194), chr(160) ]) # Special characters that may appear in the class name. charClassName = set(['.', ',', '$', '\'', '`', ':', '-', '#']) position = set([ 'in', 'on', 'under', 'below', 'above', 'over', 'on', 'at', 'behind', 'near' ]) # WordNet lemmatizer. lemmatizer = WordNetLemmatizer() # Logger log = logger.get() class TreeNode: """Parse tree.""" def __init__(self, className, text, children, level): """Construct a tree. """ self.className = className self.text = text self.children = children self.level = level pass def __str__(self): """To string (with tree structure parentheses)."""
def update(self): #Game loop wasplaying = True #Hack to figure out when we need to change sounds self.timer = 0 self.frameCount = 0 #Timer logic: after 30 frames, increment hud timer self.didTeleport = False while True: #Start timer and handle events milliStart = pygame.time.get_ticks() events = pygame.event.get() logEvents = eventmanager.get().handleEvents(events) #if event = r or pause --> don't log if logEvents : logger.get().add(logger.LogNode(events)) if (time.clock() - self.timer > 2) and self.invincible : self.stopInvincibility() elif eventmanager.get().REPLAYPRESSED == True : #save game before hand self.saveState() logger.get().replay() self.saveCharSelection() # for some reason, replay resets the save file's char to 0 self.loadLevel(0) #give temporary invincibility -> for 2 seconds self.tempInvincibility() #player is playing the game if self.startMenu.isPlaying(): if not wasplaying: sound.play_bgm(self.currLevel.bgm) #player died if not self.currLevel.player_alive: self.player_lives -= 1 #reset logger logger.get().setStart(self.currLevel.player.rect.x, self.currLevel.player.rect.y) logger.get().clear() #reload level self.loadLevel() #check if died due to teleportation if self.didTeleport: sound.play_bgm(self.currLevel.bgm) self.didTeleport = False #game over if self.player_lives < 1: self.currLevel.charSelected = False self.currLevel.charsel.char = 0 self.screen.fill(0) gameover = StaticImage( "images/gameover.jpg", 0, 0 ) gameover.rect.topleft = self.camera.window.centerx - gameover.rect.width/2,\ self.camera.window.centery - gameover.rect.width/2 gameover.draw(self.camera) pygame.display.flip() time.sleep(3) self.player_lives = constants.PLAYER_LIVES logger.get().setStart(0, 500) #select bg color depending on level #hopefully to cut down on image size if self.currLevel.levelNumber == -1: self.screen.fill(constants.LEVELNEG1_BGCOLOR) elif self.currLevel.levelNumber == 0: self.screen.fill(constants.LEVEL0_BGCOLOR) elif self.currLevel.levelNumber == 1: self.screen.fill(constants.LEVEL1_BGCOLOR) elif self.currLevel.levelNumber == 3: self.screen.fill(constants.LEVEL3_BGCOLOR) else: self.screen.fill(constants.DEFAULT_BGCOLOR) self.currLevel.draw(self.camera) if self.currLevel.charSelected and self.currLevel.plotOver: #Hud timer logic self.frameCount = self.frameCount + 1 if self.frameCount > 30: self.hud.incTime() self.frameCount = 0 self.hud.draw(self.camera, self) #Update player and enemies positions/current actions if not eventmanager.get().isPaused(): self.currLevel.update() if self.currLevel.charSelected: self.hud.update() #check for telportation if constants.TELEPORT == True: wasplaying = False self.handleTeleport() constants.TELEPORT = False #check for level completion if self.currLevel.levelCompleted: wasplaying = False self.levelNumber += 1 self.hud.resetTime() self.currLevel = self.getCurrentLevel() logger.get().clear() else: wasplaying = True else: #show pause menu self.pauseMenu.draw(self.camera) self.pauseMenu.update() #'quit to main' clicked if self.pauseMenu.showMainMenu: self.startMenu.playing = False self.startMenu.show_level = False self.currLevel = self.getCurrentLevel() self.pauseMenu.showMainMenu = False eventmanager.get().PAUSED = False elif self.pauseMenu.restartLevel: #'restart level' clicked self.hud.resetTime() self.currLevel = self.getCurrentLevel() self.pauseMenu.restartLevel = False eventmanager.get().PAUSED = False #Update camera position using player's player_rect = self.currLevel.get_player_rect() self.camera.updatePosition(player_rect) else: #update inputs from startMenu if wasplaying: sound.play_bgm(self.startMenu.bgm) self.startMenu.update() self.camera.zeroPosition() self.startMenu.draw(self.camera) wasplaying = False if self.startMenu.isPlaying(): self.levelNumber = self.startMenu.getLevel() self.hud.resetTime() self.currLevel = self.getCurrentLevel() logger.get().clear() #'Load Game' clicked if self.startMenu.loadLevel: self.loadLevel() #Fill the screen, draw level, flip the buffer pygame.display.flip() #Stop timer and sleep for remainder of time milliEnd = pygame.time.get_ticks() leftover = constants.mSPF - (milliEnd - milliStart) #sys.stdout.write('Time left in frame: %s\n' % leftover) if leftover > 0: pygame.time.wait(int(leftover))
def __init__(self): self._reg = {} self.log = logger.get() pass
return args if __name__ == '__main__': # Command-line arguments args = parse_args() # Model ID model_id = get_model_id('vae_mnist') # Log folder logs_folder = args.logs logs_folder = os.path.join(logs_folder, model_id) # Plain text logger log = logger.get(os.path.join(logs_folder, 'raw')) log.log_args() log_register(log.filename, 'plain', 'Raw logs') # Create time series loggers logp_logger = TimeSeriesLogger( os.path.join(logs_folder, 'logp.csv'), labels=['train', 'valid'], name='Log prob lowerbound', buffer_size=1) sparsity_logger = TimeSeriesLogger( os.path.join(logs_folder, 'sparsity.csv'), labels=['encoder', 'decoder'], name='Hidden layer sparsity', buffer_size=1) step_time_logger = TimeSeriesLogger(
import logger import database as db log = logger.get("FACEIT_DB") async def update_nickname(faceit_guid, api_player_name): async with db.transaction() as tx: await tx.execute( "INSERT INTO faceit_aliases (faceit_guid, faceit_nickname) VALUES ($1, $2)", faceit_guid, api_player_name) await tx.execute( "UPDATE faceit_player SET faceit_nickname = $1 WHERE faceit_guid = $2", api_player_name, faceit_guid) log.info("Updated nickname %s for user %s" % (api_player_name, faceit_guid)) async def add_nickname(faceit_guid, api_player_name): async with db.transaction() as tx: await tx.execute( "INSERT INTO faceit_aliases (faceit_guid, faceit_nickname) VALUES ($1, $2)", faceit_guid, api_player_name) log.info("Added new nickname %s for user %s" % (api_player_name, faceit_guid)) async def channels_to_notify_for_user(guid): rows = await db.fetch( """ SELECT channel_id, custom_nickname
return args if __name__ == '__main__': # Command-line arguments args = parse_args() # Model ID model_id = get_model_id('vae_mnist') # Log folder logs_folder = args.logs logs_folder = os.path.join(logs_folder, model_id) # Plain text logger log = logger.get(os.path.join(logs_folder, 'raw')) log.log_args() log_register(log.filename, 'plain', 'Raw logs') # Create time series loggers logp_logger = TimeSeriesLogger(os.path.join(logs_folder, 'logp.csv'), labels=['train', 'valid'], name='Log prob lowerbound', buffer_size=1) sparsity_logger = TimeSeriesLogger(os.path.join(logs_folder, 'sparsity.csv'), labels=['encoder', 'decoder'], name='Hidden layer sparsity', buffer_size=1) step_time_logger = TimeSeriesLogger(os.path.join(logs_folder, 'step_time.csv'),
def _prepareLogDir(self): try: if not os.path.exists(self._logDir): os.makedirs(self._logDir) except Exception as e: logger.get().error("%s" % e)
import asyncio import datetime import db_endpoints as db import faceit_api import logger import traceback from config import config log = logger.get("MAIN") faceit_config = config(section="faceit")['faceit'] API_KEY = faceit_config['api_key'] DEFAULT_HEADERS = {"accept": "application/json", "Authorization": "Bearer {0}".format(API_KEY)} MATCH_SEARCH_RANGE = int(faceit_config['match_search_range_s']) async def get_winner_and_loser_score(score_string): # Faceit api has score listed as "16 / 7" score1, score2 = score_string.replace(" ", "").split("/") score_list = [int(score1), int(score2)] return max(score_list), min(score_list) async def get_player_rank_in_team(players_list, player_dict): return sorted(players_list, reverse=True, key=lambda x: int(x.get("player_stats").get("Kills"))).index(player_dict) + 1 async def get_team_data(teams_list): for team in teams_list: if team.get("team_stats").get("Team Win") == "1": winner_team_id = team.get("team_id")
import aiohttp import asyncio import os import http_util import logger import retry log = logger.get("FACEIT_API") FACEIT_API_KEY = os.environ.get("FACEIT_API_KEY", None) AUTH_HEADER = {"Authorization": "Bearer {0}".format(FACEIT_API_KEY)} class NotFound(Exception): pass class UnknownError(Exception): def __init__(self, response): super().__init__("Unknown faceit error: HTTP Status {0}".format( response.status)) self.response = response async def player_history(player_id, offset=0, limit=20): query = { "offset": str(offset), "limit": str(limit), }
matplotlib.use('Agg') import matplotlib.cm as cm import matplotlib.pyplot as plt import matplotlib.patches as patches import progress_bar as pb import plot_utils as pu from deep_dashboard_utils import log_register, TimeSeriesLogger from build_conv_lstm_tracker import build_tracking_model # from tud import get_dataset from kitti import get_dataset import logger log = logger.get() def collect_draw_sequence(draw_raw_imgs, draw_raw_gt_bbox, seq_length, height, width): count_draw = 0 idx_draw_frame = 0 skip_empty = True draw_imgs = [] draw_gt_box = [] while count_draw <= seq_length: if draw_raw_gt_bbox[0, idx_draw_frame, 4] == 1: skip_empty = False if not skip_empty:
def setChar(self, thechar): self.char = thechar logger.get().setChar(thechar)
py3 = 0 except ImportError: from tkinter import * py3 = 1 import tkMessageBox import PIL.Image import PIL.ImageTk # XXX: should import first or later it will override ttk from idlelib.ToolTip import * log = logger.get(__name__) def set_Tk_var(): # These are Tk variables used passed to Tkinter and must be # defined before the widgets using them are created. global var_chk_record, var_chk_repeat var_chk_record = IntVar() var_chk_repeat = IntVar() def vp_start_gui(): """ Starting point when module is the main routine. """ global val, w, root root = Tk() w = IPAGUI(root)
import os, time, requests, json, pickle, sys from shutil import copyfile as copy_file import chromedriver_autoinstaller from seleniumwire import webdriver from selenium.webdriver.chrome.options import Options from selenium.common.exceptions import TimeoutException, NoSuchElementException from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from utils import * import logger log = logger.get(f"blinkistscraper.{__name__}") def has_login_cookies(): return os.path.exists("cookies.pkl") def get_login_cookies(): return pickle.load(open("cookies.pkl", "rb")) def load_login_cookies(driver): for cookie in get_login_cookies(): # selenium doesn't like float-based cookies parameters # if 'expiry' in cookie: # cookie['expiry'] = int(cookie['expiry']) driver.add_cookie(cookie)
def __init__(self, name=None, var_name=None): super(CmdListener, self).__init__() self.var_name = var_name self.name = name self.log = logger.get() pass
import paho.mqtt.client as mqtt import logger LWT_ONLINE = 'online' LWT_OFFLINE = 'offline' _LOGGER = logger.get(__name__) class MqttClient: def __init__(self, config): self._config = config self._mqttc = mqtt.Client( client_id=self.client_id, clean_session=False, userdata={'global_topic_prefix': self.topic_prefix}) if self.username and self.password: self.mqttc.username_pw_set(self.username, self.password) if self.availability_topic: topic = self._format_topic(self.availability_topic) _LOGGER.debug("Setting LWT to: %s" % topic) self.mqttc.will_set(topic, payload=LWT_OFFLINE, retain=True) def publish(self, messages): if not messages: return for m in messages: topic = self._format_topic(m.topic) self.mqttc.publish(topic, m.payload, retain=m.retain)
import os, sys, errno import re import time import subprocess as subp import tempfile import select, signal, threading import fcntl import options, logger from messages import CMessageTextQueue, CLogMessageSource, CMessage import itertools import legacy LOG4J_PROCESS = "log4jServer" SINGLE_READER = False LOGGER = logger.get() def log(msg): global LOGGER if LOGGER != None: LOGGER.log(msg) else: print msg def warn(msg): global LOGGER if LOGGER != None: LOGGER.warn(msg) else: print "!!!", msg def error(msg): global LOGGER if LOGGER != None: LOGGER.error(msg) else: print "!!!", msg