def encode(inarr): conf = Config() result = [] for element in inarr: result.append( encoder(element, conf.get_threshold(), conf.get_encoding_length())) return torch.stack(result, dim=0)
def __init__(self, N, M): """Initialize the NTM Memory matrix. The memory's dimensions are (batch_size x N x M). Each batch has it's own memory matrix. :param N: Number of rows in the memory. :param M: Number of columns/features in the memory. """ super(NTMMemory, self).__init__() self.N = N self.M = M # The memory bias allows the heads to learn how to initially address # memory locations by content self.register_buffer('mem_bias', torch.Tensor(N, M)) self.memory = None self.init_mem = None # Initialize memory bias stdev = 1 / (np.sqrt(N + M)) nn.init.uniform_(self.mem_bias, -stdev, stdev) conf = Config() lower_bound, upper_bound = conf.output_range() threshold = conf.get_threshold() length = conf.get_encoding_length() mem = [] for num in range(lower_bound, upper_bound + 1): mem.append(enc(num, threshold, length)) self.memory = torch.stack(mem, dim=0).float() self.init_mem = torch.stack(mem, dim=0).float()
def __init__(self): config = OpenIHMConfig() self.real_config = config self.test_dir = os.path.dirname(__file__) config_file = os.path.join(self.test_dir, '..', 'test_openihm.cfg') read = config.read(config_file) if len(read) != 1: m = 'Need test_openihm.cfg setup with database parameters' e = unittest.SkipTest(m) raise e Config.set_config(config) self.dbconfig = config.database_config() self.config = DbConfig(**self.dbconfig)
def create_config(): return Config('config.json', foldername='Config', username='******', oauth='oauth: OAUTH', nick='NICK_NAME', channel='CHANNEL')
class MSSSParams(object): # specified = attrib(default=Factory(specified_param)) # @TODO: change them when testing conf = Config() M = conf.get_encoding_length() m_min, m_max = conf.output_range() N = m_max - m_min + 1 seq_length = conf.get_input_size( ) + 1 # number of elements in the input sequence var_seq_length = conf.var_input_size + 1 elem_size = conf.get_encoding_length( ) # length of the vector of each element in the input sequence total_data_size = conf.get_train_data_size() # total_data_size = conf.get_test_data_size() batch_len = conf.batch_size batch_num = total_data_size / batch_len name = attrib(default="msss-task") controller_size = attrib(default=100, convert=int) controller_layers = attrib(default=conf.layer_size, convert=int) num_heads = attrib(default=1, convert=int) sequence_width = attrib(default=elem_size, convert=int) # sequence_min_len = attrib(default=1, convert=int) # sequence_max_len = attrib(default=20, convert=int) memory_n = attrib(default=N, convert=int) memory_m = attrib(default=M, convert=int) num_batches = attrib(default=batch_num, convert=int) batch_size = attrib(default=batch_len, convert=int) rmsprop_lr = attrib(default=1e-4, convert=float) rmsprop_momentum = attrib(default=0.9, convert=float) rmsprop_alpha = attrib(default=0.95, convert=float)
def Run(self, file_path, frame_delay=aob.DEFAULT_frameDelay, show_selections=aob.DEFAULT_showSelections): """Must be called from the UI event thread. Creates a worker thread with the _RunBegin() and _Runend() methods. Args: file_path (str): path to GIF file to create frame_delay (float): delay in seconds b/w frames show_selections (bool): toggle """ if Config.IsWindows(): dialog = wx.GenericProgressDialog( 'Save Animated Image', 'Initializing...', style=wx.PD_APP_MODAL | wx.PD_AUTO_HIDE | wx.PD_CAN_ABORT) else: dialog = wx.ProgressDialog('Save Animated Image', 'Initializing...', style=wx.PD_APP_MODAL | wx.PD_AUTO_HIDE | wx.PD_CAN_ABORT) dialog.Show() wxlibdr.startWorker( self._RunEnd, self._RunBackground, wargs=[dialog, file_path, frame_delay, show_selections])
def open_browser(self, driver): # 根据browser的值调用对应浏览器的驱动器,并在日志中输出步骤 browser = Config().get('browser') if browser == "Firefox": driver = webdriver.Firefox(self.Firefox_path) time.sleep(2) logger.info("starting FireFox browser....") elif browser == "Chrome": driver = webdriver.Chrome(self.chrome_path) logger.info("starting Chrome browser....") elif browser == "IE": driver = webdriver.Ie(self.IE_path) logger.info("starting IE browser....") driver.find_element() # 打开配置文件中定义的地址 driver.get(self.URL) time.sleep(3) logger.info("open url : %s" % self.URL) # 最大化浏览器窗口 driver.maximize_window() logger.info("maximize the current windows...") # 登录 driver.find_element_by_name("mobile_number").send_keys(self.user_name) driver.find_element_by_name("password").send_keys(self.password) driver.find_element_by_xpath( "//button[contains(text(), '登录')]").click() logger.info("logining..........") time.sleep(2) # 设置隐形等待时间 driver.implicitly_wait(10) logger.info("Set implicitly wait 10 seconds.") return driver
def __load_config(self, category: str) -> Config: """翻訳定義ファイルをロード :param str category: 翻訳定義ファイルの種類 :return Config: 翻訳データのコンフィグ :raise NotFoundError: 翻訳定義ファイルが存在しない """ return Config(self.__trans_path(self._lang, category))
def __init__(self, parent): ''' Set up the dialog box interface ''' self.parent = parent QDialog.__init__(self) self.setupUi(self) self.parent = parent self.config = Config.dbinfo().copy() # get food energy requirement details by sex and age self.getFoodEnergyRequirements()
class TestLogin(unittest.TestCase): user_name = Config().get('login_name2') password = Config().get('login_password2') @classmethod def setUpClass(cls): """ 测试固件的setUp()的代码,主要是测试的前提准备工作 :return: """ browse = BrowserEngine(cls) cls.driver = browse.open_browser(cls) def test_A01_login_out(self): login = LoginPage(self.driver) login.sleep(3) login.click_logout() login.type_search_element(self.user_name, self.password) login.click_button_login()
def __init__(self, parent): ''' Set up the dialog box interface ''' QDialog.__init__(self) self.setupUi(self) self.parent = parent self.config = Config.dbinfo().copy() # get projects self.getHouseholds()
def train_model(model, args): num_batches = model.params.num_batches batch_size = model.params.batch_size LOGGER.info("Training model for %d batches (batch_size=%d)...", num_batches, batch_size) losses = [] epoch_losses = [] costs = [] seq_lengths = [] start_ms = get_ms() conf = Config() data = [] for batch_num, x, y in model.dataloader: data.append((batch_num, x, y)) for epoch in range(conf.epoch): print("epoch {}".format(epoch)) for batch_num, x, y in data: # @TODO: create a deep copy of memory, mem_batch # @TODO: before train, set model's memory to mem_batch loss, cost = train_batch(model.net, model.criterion, model.optimizer, x, y) losses += [loss] costs += [cost] seq_lengths += [y.size(0)] # Update the progress bar progress_bar(batch_num, args.report_interval, loss) # Report if batch_num % args.report_interval == 0: mean_loss = np.array(losses[-args.report_interval:]).mean() mean_cost = np.array(costs[-args.report_interval:]).mean() mean_time = int(((get_ms() - start_ms) / args.report_interval) / batch_size) progress_clean() LOGGER.info("Batch %d Loss: %.6f Cost: %.2f Time: %d ms/sequence", batch_num, mean_loss, mean_cost, mean_time) start_ms = get_ms() # Checkpoint ''' if (args.checkpoint_interval != 0) and (batch_num % args.checkpoint_interval == 0): save_checkpoint(model.net, model.params.name, args, batch_num, losses, costs, seq_lengths) ''' epoch_losses += [np.array(losses[-model.params.num_batches:]).mean()] with open('train_loss.txt', "wb") as f: pk.dump((losses, epoch_losses), f) torch.save(model.net.state_dict(), './model/testmodel') LOGGER.info("Done training.")
def __init__(self, parent): ''' Set up the dialog box interface ''' self.parent = parent QDialog.__init__(self) self.setupUi(self) self.parent = parent self.config = Config.dbinfo().copy() self.lblProjectName.setText( self.parent.projectname ) # get current project details self.getHouseholds()
def __init__(self, parent): """ Set up the dialog box interface """ self.parent = parent QDialog.__init__(self) self.setupUi(self) self.parent = parent self.currencyid = 0 # connect to database self.config = Config.dbinfo().copy() self.listCurrencies()
def executeUpdateQuery(self, query): """Execute a query that needs to be committed to the database. For example, an INSERT or UPDATE query. """ config = Config.dbinfo().copy() db = connector.Connect(**config) cursor = db.cursor() cursor.execute(query) db.commit() cursor.close() db.close() return
def executeResultsQuery(self, query): """Execute a query for which the database will return results. For example a SELECT query. """ config = Config.dbinfo().copy() db = connector.Connect(**config) cursor = db.cursor() cursor.execute(query) results = cursor.fetchall() cursor.close() db.close() return results
def __init__(self, parent, hhid = "", hhname = ""): ''' Set up the dialog box interface ''' QDialog.__init__(self) self.setupUi(self) self.parent = parent self.config = Config.dbinfo().copy() self.lblProjectName.setText( self.parent.projectname ) self.txtHouseholdNo.setText( hhid ) self.txtHouseholdName.setText ( hhname ) self.getHouseholds()
def __init__(self, parent): ''' Set up the dialog box interface ''' self.parent = parent QDialog.__init__(self) self.setupUi(self) self.parent = parent self.currentitem = "" # connect to database self.config = Config.dbinfo().copy() self.listItems()
def executeMultipleUpdateQueries(self, queries): """This method is idential to self.executeUpdateQuery except that it takes a list of query strings and executes each in turn """ config = Config.dbinfo().copy() db = connector.Connect(**config) cursor = db.cursor() for query in queries: cursor.execute(query) db.commit() cursor.close() db.close() return
def executeUpdateQuery(self, query, params=None): """Execute a query that needs to be committed to the database. For example, an INSERT or UPDATE query. """ config = Config.dbinfo().copy() db = connector.Connect(**config) cursor = db.cursor() converted_qt = ParamsUtility.make_parameters_safe(params) cursor.execute(query, converted_qt) db.commit() cursor.close() db.close() return
def main(matrix, config_base): # Read scoreboard options from config.json if it exists config = Config(config_base, matrix.width, matrix.height) logger = logging.getLogger("mlbled") if config.debug: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.WARNING) # Print some basic info on startup debug.info("%s - v%s (%sx%s)", SCRIPT_NAME, SCRIPT_VERSION, matrix.width, matrix.height) if emulated: debug.log("rgbmatrix not installed, falling back to emulator!") debug.log("Using RGBMatrixEmulator version %s", version.__version__) else: debug.log("Using rgbmatrix version %s", __version__) # Draw startup screen logo = "assets/mlb-w" + str(matrix.width) + "h" + str( matrix.height) + ".png" # MLB image disabled when using renderer, for now. # see: https://github.com/ty-porter/RGBMatrixEmulator/issues/9#issuecomment-922869679 if os.path.exists(logo) and not emulated: logo = Image.open(logo) matrix.SetImage(logo.convert("RGB")) logo.close() # Create a new data object to manage the MLB data # This will fetch initial data from MLB data = Data(config) # create render thread render = threading.Thread(target=__render_main, args=[matrix, data], name="render_thread", daemon=True) time.sleep(1) render.start() screen = data.get_screen_type() if screen == "news": __refresh_offday(render, data) elif screen == "standings": __refresh_standings(render, data) else: __refresh_games(render, data)
def __di_register(self, config: Config, request: Request): """DIコンテナーへの登録 :param Config config: コンフィグ :param Request request: リクエスト :raise DataFormatError: コンフィグ内のDI定義が不正 """ self.__register_config(config) self.__register_request(request) deffinitions = config.get('di') if deffinitions: for key, deffinition in deffinitions.items(): self.__register_by_deffinition(key, deffinition)
def __init__(self, directory=None, watch=None, config=None): self.directory = directory # track file last update self.last_update = None self.last_status_update = None # last line in file self.last_line = None # last filename self.last_filename = None self.__events = [] self.__status = None self.__last_status = None self.is_include_pass_event = False self.has_new_status = False if watch is None: self.watch = [] else: self.watch = watch if config is None: self.config = Config() else: self.config = config
def test_run(self): config = Config('config/app-dev.yml') App().run( config, { 'url': '/api/GetDevice', 'method': 'GET', 'headers': { 'Authorization': 'auth_key', }, 'queries': { 'hoge': 'fuga', }, 'body': { 'fizz': 'buzz', }, })
def executeMultipleResultsQueries(self, queries): """This method is idential to self.executeResultsQuery except that it takes a list of query strings, executes each in turn and returns a corresponding list of results. """ results = [] config = Config.dbinfo().copy() db = connector.Connect(**config) cursor = db.cursor() for query in queries: cursor.execute(query) result = cursor.fetchall() results.append(result) cursor.close() db.close() return results
def run(self, config: Config, event: dict) -> dict: """イベントデータから対応するハンドラーを呼び出し、レスポンスを返却 :param Config config: コンフィグ :param dict event: イベントデータ :return dict: レスポンスの連想配列 :raise Error: Error系の例外発生時にメッセージを整形して再出力 """ try: request = self.__build_request(event) Bootstrap(config, request) router = Router(config.get('routes.path')) receiver = router.dispatch(request.url) handler = receiver.instantiate(config, request) return handler().to_dict() except Error as e: raise Exception(f'[{e.code}] {e.__class__.__name__}: {e.message}')
def test_run(self): config = Config('tests/unit/app/fixtures/lambdaapp/config.yml') response = App().run( config, { 'url': '/test/App', 'method': 'GET', 'headers': { 'Authorization': 'auth_key', }, 'queries': { 'hoge': 'fuga', }, 'body': { 'fizz': 'buzz', }, }) self.assertEqual({'success': True}, response)
def __init__(self, parent, hhid, hhname, expid = 0 ): ''' Set up the dialog box interface ''' QDialog.__init__(self) self.setupUi(self) self.parent = parent self.pid = parent.parent.projectid self.hhid = hhid self.expid = expid self.config = Config.dbinfo().copy() self.getExpenditureTypes() if ( expid != 0 ): self.displayExpenditureDetails() self.setWindowTitle( "Edit Household Expenditure" ) # display household name self.lblHouseholdName.setText(hhname)
class BrowserEngine(object): Firefox_path = Config().get('Firefox_path') IE_path = Config().get('IE_path') chrome_path = Config().get('chrome_path') URL = Config().get('url', index=0) user_name = Config().get('login_name') password = Config().get('login_password') yaml = DATA_PATH + '/config.yaml' def __init__(self, driver): self.driver = driver def open_browser(self, driver): # 根据browser的值调用对应浏览器的驱动器,并在日志中输出步骤 browser = Config().get('browser') if browser == "Firefox": driver = webdriver.Firefox(self.Firefox_path) time.sleep(2) logger.info("starting FireFox browser....") elif browser == "Chrome": driver = webdriver.Chrome(self.chrome_path) logger.info("starting Chrome browser....") elif browser == "IE": driver = webdriver.Ie(self.IE_path) logger.info("starting IE browser....") driver.find_element() # 打开配置文件中定义的地址 driver.get(self.URL) time.sleep(3) logger.info("open url : %s" % self.URL) # 最大化浏览器窗口 driver.maximize_window() logger.info("maximize the current windows...") # 登录 driver.find_element_by_name("mobile_number").send_keys(self.user_name) driver.find_element_by_name("password").send_keys(self.password) driver.find_element_by_xpath( "//button[contains(text(), '登录')]").click() logger.info("logining..........") time.sleep(2) # 设置隐形等待时间 driver.implicitly_wait(10) logger.info("Set implicitly wait 10 seconds.") return driver def quit_browser(self): logger.info("Now, Close and quit the browser.") self.driver.quit()
def _InitUI( self ): """ """ dmgr = self.state.dataModelMgr image_list = wx.ImageList( 16, 16, initialCount = 4 ) for n in ( 'unselected', 'selected', 'unselected', 'selected' ): name = 'item_{0}_16x16.png'.format( n ) im = wx.Image( os.path.join( Config.GetResDir(), name ) ) image_list.Add( im.ConvertToBitmap() ) self.AssignStateImageList( image_list ) root_item = self.AddRoot( 'Datasets' ) #r root_item = self.AddRoot( 'Files' ) self.SetItemState( root_item, wx.TREE_ITEMSTATE_NONE ) if self.showSelectedDataSet: item = self.AppendItem( root_item, LABEL_selectedDataSet, 0, 1 ) self.SetItemPyData( item, NAME_selectedDataSet ) self.selectedDataSetItem = item # -- Each model # -- if dmgr.GetDataModelCount() == 1: dmodel = dmgr.GetFirstDataModel() model_item = self.AppendItem( root_item, 'Dataset Types' ) self.SetItemPyData( model_item, dmodel ) #r self.SetItemText( root_item, 'Types' ) self._CreateModelItems( model_item, dmodel ) self.Expand( model_item ) elif dmgr.GetDataModelCount() > 1: for name in dmgr.GetDataModelNames(): dmodel = dmgr.GetDataModel( name ) # model_item = self.AppendItem( root_item, name, 0, 1 ) model_item = self.AppendItem( root_item, name ) self.SetItemPyData( model_item, dmodel ) self._CreateModelItems( model_item, dmodel ) #self.Bind( wx.EVT_TREE_SEL_CHANGED, self._OnSelectionChanged ) self.Bind( wx.EVT_TREE_KEY_DOWN, self._OnKeyDown ) self.Bind( wx.EVT_TREE_STATE_IMAGE_CLICK, self._OnItemClick )
def __init__(self, parent, hhid, hhname, incomeid = 0 ): ''' Set up the dialog box interface ''' QDialog.__init__(self) self.setupUi(self) self.parent = parent self.hhid = hhid self.pid = parent.parent.projectid self.incomeid = incomeid self.config = Config.dbinfo().copy() self.getGiftsTypes() self.getCropTypes() if ( incomeid != 0 ): self.displayIncomeDetails() self.setWindowTitle( "Edit Income Item" ) # display household name self.lblHouseholdName.setText(hhname)
def execute(): conf = Config() filename = filedialog.askopenfilename( initialdir=os.path.join(conf.dir, conf.RACES_DIR), title="Select file", filetypes=(("Race JSON", "*.json"), )) if filename: if is_overlay.get() == 1: open_companion('race', '--arg1', filename, options={"is_overlay": is_overlay}, cmd=cmd)() else: open_companion('race', '--size', '800x900', '--arg1', filename, options={"is_overlay": is_overlay}, cmd=cmd)()
def __init__(self, parent, hhid, hhname, memberid): """ Set up the dialog box interface """ QDialog.__init__(self) self.setupUi(self) self.parent = parent self.hhid = hhid self.currentid = memberid # configure connect to database self.config = Config.dbinfo().copy() # add years to the year of birth combo box: current year to 150 years ago thisyear = date.today().year for year in range(thisyear, thisyear - 151, -1): self.cmbYearOfBirth.addItem("%i" % year) # display household name self.lblHouseholdName.setText(hhname) # get and display member details self.getMemberDetails()
def __init__(self, num_inputs, num_outputs, num_layers): super(LSTMController, self).__init__() self._config = Config() self.num_inputs = num_inputs self.num_outputs = num_outputs self.num_layers = num_layers self.lstm = nn.LSTM(input_size=num_inputs, hidden_size=num_outputs, num_layers=num_layers) # The hidden state is a learned parameter self.lstm_h_bias = Parameter( torch.randn(self.num_layers, 1, self.num_outputs) * self._config.h_lr) self.lstm_c_bias = Parameter( torch.randn(self.num_layers, 1, self.num_outputs) * self._config.c_lr) self.reset_parameters()
def __init__(self, parent, hhid, hhname, incomeid = 0 ): ''' Set up the dialog box interface ''' QDialog.__init__(self) self.setupUi(self) self.parent = parent self.hhid = hhid self.pid = parent.parent.projectid self.incomeid = incomeid self.config = Config.dbinfo().copy() self.getCropTypes() if ( incomeid != 0 ): self.displayIncomeDetails() self.setWindowTitle( "Edit Income Item" ) # display household name self.lblHouseholdName.setText(hhname) # lock editing of income source and unit of measure self.cboIncomeType.setEditable( False ) self.txtUnitOfMeasure.setReadOnly( True )
for e in sys.argv[1:]: if e == '--profile' or e == '-pr': i = sys.argv.index(e) + 1 if i < len(sys.argv) and not sys.argv[i].startswith('-'): interim_prf = sys.argv[i] del sys.argv[i] del sys.argv[i-1] args = CArgs() parser = argparse.ArgumentParser(#description='Specify the kanji you want to test.', formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False) conf = None try: conf = Config('config.json', os.path.dirname(os.path.abspath(__file__)) + os.sep, profile=interim_prf) add_parser_args(parser, conf) parser.parse_args(namespace=args) conf.language = args.lang conf.keymap = args.keymap except Configuration_Exception as e: print('[config] Error: ' + str(e)) sys.exit(1) if args.permutation: args.exp = 0 if args.quiet: args.verbosity = -1
from discord.ext.commands import Bot from data.Extensions import Extensions from data.config import Config class Client(Bot): """ doc """ def __init__(self, _extensions: Extensions, command_prefix, **options): super().__init__(command_prefix, **options) self.load_extensions(_extensions) def load_extensions(self, _extensions): for extension in _extensions.get_all_extensions(): self.load_extension(extension) config = Config() extensions = Extensions() client = Client(extensions, config.get_prefix()) client.run(config.get_token())
import pickle as pk import random from data.config import Config conf = Config() low_bound = conf.get_low_bound() up_bound = conf.get_up_bound() eos = conf.get_eos() # DP solution def mss(arr): dp = [] dp.append(arr[0]) result = arr[0] for i in range(1, len(arr)): curr = max(dp[i - 1] + arr[i], arr[i]) dp.append(curr) result = max(result, curr) return [result] def input_generator(length): arr = [] elements = random.randint(1, length) for i in range(elements): arr.append(random.randint(low_bound, up_bound)) return arr
def __init__(self, logger_name): self.logger = logging.getLogger(logger_name) logging.root.setLevel(logging.NOTSET) c = Config().get('log') self.log_file_name = c.get( 'fileName') if c and c.get('fileName') else 'test.log' # 日志文件 self.backup_count = c.get( 'backupCount') if c and c.get('backupCount') else 5 # 保留的日志数量 # 日志输出的级别 self.console_output_level = c.get( 'consoleLevel') if c and c.get('consoleLevel') else 'WARNING' self.file_output_lever = c.get( 'fileLevel') if c and c.get('fileLevel') else 'DEBUG' # 日志输出格式 pattern = c.get('pattern') if c and c.get('pattern') else \ '%(asctime)s - %(name)s - %(levelname)s - %(message)s' self.formatter = logging.Formatter(pattern)
def __init__(self,projectid): self.database = Database() self.pcharstable = 'p' + str(projectid) +'PersonalCharacteristics' self.hcharstable = 'p' + str(projectid) +'HouseholdCharacteristics' self.pid = projectid self.config = Config.dbinfo().copy()
def __init__(self,query): # connect to mysql database self.config = Config.dbinfo().copy() self.db = connector.Connect(**self.config) self.cursor = self.db.cursor() self.sqlquery = query
def CreateBitmap(self, value_range, bg_color=None, cmap=None, font_size=10, mapper=None, ntick_values=10, scale_type='linear', title=None): """Generate the wx.Bitmap. Args: value_range (tuple): range_min, range_max, data_min, data_max .. scalar_map (matplotlib.cm.ScalarMappable): instance used to map .. values to colors and determine the range and scale type bg_color (wx.Colour): optional color for Linux/GTK support cmap (matplotlib.colors.Colormap): optional colormap to use if mapper is None, defaulting to 'rainbow' font_size (int): font point size mapper (matplotlib.cm.ScalarMappable): optional pre-built mapper, if None one is created basd on value_range and scale_type; Note the ``norm`` property determines scale_type, overriding the ``scale_type`` parameter if provided ntick_values (int): number of values to show as ticks scale_type (str): 'linear' or 'log', used if mapper is None title (str): optional title under legend Returns: wx.Bitmap: new bitmap """ ntick_values = max(2, ntick_values) data_max = value_range[3] if len(value_range) > 3 else value_range[1] data_min = value_range[2] if len(value_range) > 2 else value_range[0] if isinstance(mapper, cm.ScalarMappable): norm = mapper.norm is_log_scale = isinstance(norm, colors.LogNorm) else: is_log_scale = scale_type == 'log' if is_log_scale: norm = colors.LogNorm( #vmin = value_range[ 0 ], vmax = value_range[ 1 ], clip = True vmin=max(data_min, 1.0e-16), vmax=max(data_max, 1.0e-16), clip=True) else: norm = colors.Normalize( #vmin = value_range[ 0 ], vmax = value_range[ 1 ], clip = True vmin=data_min, vmax=data_max, clip=True) if cmap is None: cmap = cm.get_cmap(Config.defaultCmapName_) #'rainbow' mapper = cm.ScalarMappable(norm=norm, cmap=cmap) #cmap = cm.get_cmap( 'rainbow' ) #norm = scalar_map.norm #is_log_scale = isinstance( norm, colors.LogNorm ) # -- Pre-step, format values # -- scaler = RangeScaler() steps = scaler.Calc(norm.vmin, norm.vmax, scale_type='log' if is_log_scale else 'linear', nticks=ntick_values, cull_outside_range=True) labels_mode = 'log' if is_log_scale else 'linear' labels = scaler.CreateLabels(steps, labels_mode) widest_label = None for l in labels: if widest_label is None or len(l) > len(widest_label): widest_label = l # -- Calc sizes # -- border = 2 text_gap = 8 # line drawn from text to color block pen_color = (0, 0, 0, 255) dc = wx.MemoryDC() dc.SelectObject(wx.EmptyBitmapRGBA(64, 64)) # if Config.GetOSName() == 'windows': # font_size = int( font_size * 0.8 ) font_params = \ { 'pointSize': font_size, 'family': wx.FONTFAMILY_SWISS, 'style': wx.FONTSTYLE_NORMAL, 'weight': wx.FONTWEIGHT_NORMAL } if Config.GetOSName() == 'windows': font_params['faceName'] = 'Lucida Sans' # 'Arial' font_params['weight'] = wx.FONTWEIGHT_BOLD cur_font = wx.Font(**font_params) dc.SetFont(cur_font) text_size = dc.GetTextExtent('99' + widest_label) if title: tsize = dc.GetTextExtent(title) if tsize[0] > text_size[0]: text_size = (tsize[0], tsize[1]) if Config.GetOSName() == 'darwin': block_size = text_size[1] << 1 else: block_size = int(text_size[1] * 1.5) im_wd = border + text_size[0] + text_gap + block_size + border color_band_ht = block_size * ntick_values im_ht = \ border + (text_size[ 1 ] << 1) + color_band_ht + \ (text_size[ 1 ] >> 1) + 2 + \ text_size[ 1 ] + (text_size[ 1 ] >> 1) # border + block_size + color_band_ht + # text_size[ 1 ] + border if title: im_ht += block_size + text_size[1] #this is drawn empty in Windows if bg_color is not None: bmap = wx.EmptyBitmapRGBA(im_wd, im_ht, bg_color.red, bg_color.green, bg_color.blue, bg_color.alpha) else: bmap = wx.EmptyBitmapRGBA(im_wd, im_ht) dc.SelectObject(bmap) if bg_color is None: if Config.GetOSName() == 'windows': dc.SetBackground( wx.TheBrushList.FindOrCreateBrush( wx.WHITE, wx.BRUSHSTYLE_SOLID #wx.Colour( 225, 225, 225, 255 ), wx.BRUSHSTYLE_SOLID )) dc.Clear() else: dc.SetBackground( wx.TheBrushList.FindOrCreateBrush(wx.WHITE, wx.TRANSPARENT)) #end if bg_color is None gc = wx.GraphicsContext.Create(dc) trans_brush = gc.CreateBrush( wx.TheBrushList.FindOrCreateBrush(wx.WHITE, wx.TRANSPARENT)) black_pen = gc.CreatePen(wx.Pen(wx.BLACK, 1)) gfont = gc.CreateFont(cur_font, wx.BLACK) gc.SetAntialiasMode(wx.ANTIALIAS_DEFAULT) # wx.ANTIALIAS_NONE gc.SetBrush(trans_brush) gc.SetFont(gfont) gc.SetInterpolationQuality(wx.INTERPOLATION_BEST) gc.SetPen(black_pen) # x,y is UL position of the contour color block x = im_wd - border - block_size # do this if not drawing max value #y = border + (text_size[ 1 ] >> 1) y = border # -- Write max value # -- data_max_log = int(math.log10(data_max)) if data_max > 0.0 else 0 steps_max_log = int(math.log10(steps[-1])) if steps[-1] > 0.0 else 0 #cur_label = scaler.Format( data_max, 3 ) cur_label = \ scaler.Format( data_max, 3 ) if data_max_log == steps_max_log else \ '{0:.3g}'.format( data_max ) label_size = gc.GetFullTextExtent(cur_label) gc.DrawText(cur_label, x + block_size - label_size[0], y + label_size[2]) #x y += text_size[ 1 ] + (text_size[ 1 ] >> 1) y += text_size[1] << 1 # -- Draw color band # -- max_value = max(norm.vmax, steps[-1]) min_value = min(norm.vmin, steps[0]) if is_log_scale: # log_delta = \ # 0 if norm.vmin == 0.0 else math.log10( max_value / min_value ) log_delta = \ 0 if norm.vmin == 0.0 else \ 0 if max_value <= 0.0 or min_value <= 0.0 else \ math.log10( max_value / min_value ) log_b = log_delta / color_band_ht log_factor = math.pow(10.0, log_b) #log_a = value_range[ 1 ] / math.pow( 10.0, log_factor * color_band_ht ) # this is 1 else: #value_delta = norm.vmax - norm.vmin value_delta = max_value - min_value value_incr = value_delta / (color_band_ht - 1) #cur_value = value_range[ 1 ] cur_value = max_value for j in xrange(color_band_ht): # if is_log_scale: # cur_value = value_range[ 1 ] / math.pow( 10.0, log_b * j ) #color = cmap( norm( cur_value ), bytes = True ) color = mapper.to_rgba(cur_value, bytes=True) gc.SetBrush( gc.CreateBrush( wx.TheBrushList.FindOrCreateBrush(wx.Colour(*color), wx.BRUSHSTYLE_SOLID))) gc.SetPen( gc.CreatePen( wx.ThePenList.FindOrCreatePen(wx.Colour(*color), 1, wx.PENSTYLE_SOLID))) gc.DrawRectangle(x, y + j, block_size, 1) if is_log_scale: cur_value /= log_factor else: cur_value -= value_incr #end for gc.SetBrush(trans_brush) gc.SetPen( gc.CreatePen( wx.ThePenList.FindOrCreatePen(wx.Colour(*pen_color), 1, wx.PENSTYLE_SOLID))) gc.DrawRectangle(x, y, block_size, color_band_ht) # -- Draw contour values and lines # -- for j in xrange(len(steps)): cur_value = steps[j] cur_label = labels[j] if is_log_scale: tick_delta = math.log10(max_value / cur_value) / log_b else: tick_delta = (max_value - cur_value) / value_delta * color_band_ht tick_y = y + tick_delta gc.DrawLines(((x - text_gap, tick_y), (x, tick_y))) label_size = gc.GetFullTextExtent(cur_label) gc.DrawText(cur_label, x - text_gap - label_size[0] - 1, tick_y - (label_size[1] / 2.0)) #end for j y += color_band_ht # -- Write min value # -- if data_min == 0.0 or steps[0] == 0.0: data_min_log = steps_min_log = 0 else: data_min_log = int(math.log10(abs(data_min))) steps_min_log = int(math.log10(abs(steps[0]))) #cur_label = scaler.Format( data_min, 3 ) cur_label = \ scaler.Format( data_min, 3 ) if data_min_log == steps_min_log else \ '{0:.3g}'.format( data_min ) label_size = gc.GetFullTextExtent(cur_label) #x gc.DrawText( cur_label, x + block_size - label_size[ 0 ], y + 2 ) y += (text_size[1] >> 1) + 2 gc.DrawText(cur_label, x + block_size - label_size[0], y) if title: #x y += block_size y += text_size[1] + (text_size[1] >> 1) title_size = gc.GetFullTextExtent(title) tx = im_wd - title_size[0] gc.DrawText(title, tx, y) #end if title dc.SelectObject(wx.NullBitmap) return bmap
from data.generator import mss_generator from data.config import Config conf = Config() if __name__ == '__main__': train_file = "data/train.txt" test_file = "data/test.txt" test_file_var = "data/test_var.txt" mss_generator(conf.get_train_data_size(), train_file, conf.get_input_size()) mss_generator(conf.get_test_data_size(), test_file, conf.get_input_size()) mss_generator(conf.get_test_data_size(), test_file_var, conf.var_input_size)
def lambda_handler(event, context): config = Config(f'config/app-{os.environ["ENV"]}.yml') return App().run(config, event)
def __init__(self, parent): ''' Set up the dialog box interface ''' QDialog.__init__(self) self.setupUi(self) self.parent = parent self.config = Config.dbinfo().copy()
def __init__(self): self.database = Database() self.config = Config.dbinfo().copy()
def test_get(self): config = Config('tests/unit/data/fixtures/config/config.yml') self.assertEqual(123, config.get('hoge.fuga.piyo')) self.assertEqual('value', config.get('arr.2.key'))
class JournalWatcher: is_modified = False def __init__(self, directory=None, watch=None, config=None): self.directory = directory # track file last update self.last_update = None self.last_status_update = None # last line in file self.last_line = None # last filename self.last_filename = None self.__events = [] self.__status = None self.__last_status = None self.is_include_pass_event = False self.has_new_status = False if watch is None: self.watch = [] else: self.watch = watch if config is None: self.config = Config() else: self.config = config def include_pass_event(self): self.is_include_pass_event = True def __get_journal_files(self): """ get all parts of latest file :return: sorted by old to new """ # get all journal files pattern_journals = re.compile('Journal\.(\d+)\.\d+\.log') file_list = os.listdir(self.directory) journal_files = list(filter(lambda s: pattern_journals.match(s), file_list)) # get latest journal timestamps = map(lambda s: int(pattern_journals.match(s)[1]), journal_files) max_timestamp = max(timestamps) # get all latest file pattern_latest = re.compile('Journal\.{}\.\d+\.log'.format(max_timestamp)) return sorted(filter(lambda s: pattern_latest.match(s), journal_files)) def __journal_event_generator(self, files): for filename in files: if self.last_filename != filename: self.last_filename = filename self.last_line = 0 full_path = os.path.join(self.directory, filename) file_stat = os.stat(os.path.join(self.directory, full_path)) # if self.last_update is not None and file_stat.st_mtime < self.last_update: # continue with open(full_path, 'r') as fp: current_file_line = 0 while True: line = fp.readline() current_file_line += 1 if self.last_line > current_file_line: continue if not line: break try: decoded = json.loads(line) yield decoded except json.decoder.JSONDecodeError: pass self.last_line = current_file_line def __parse_timestamp(self, timestamp_string): return datetime.strptime(timestamp_string, '%Y-%m-%dT%H:%M:%SZ') def __encode_timestamp(self, now): return now.strftime('%Y-%m-%dT%H:%M:%SZ') def __extract(self, file): for e in self.__journal_event_generator([file]): event_name = e['event'] if 'timestamp' in e: e['timestamp'] = self.__parse_timestamp(e['timestamp']) # record events if event_name in self.watch: self.__events.append(e) def refresh(self): self.is_modified = False self.__events = [] files = self.__get_journal_files() if len(files) > 0: for file in files: last_file_stat = os.stat(os.path.join(self.directory, file)) if self.last_update is None or last_file_stat.st_mtime > self.last_update: self.__extract(file) self.last_update = last_file_stat.st_mtime self.is_modified = True if self.is_include_pass_event: self.get_status() if len(self.__events) > 0: timestamp = self.__events[-1]['timestamp'] else: timestamp = datetime.utcnow() if self.has_new_status: self.__events.append({ "timestamp": timestamp, "event": "Pass", }) self.is_modified = True return self.is_modified def get_route(self): path = os.path.join(self.directory, 'NavRoute.json') if not os.path.isfile(path): return None route = None with open(path, 'r') as file: route = json.load(file) if 'Route' not in route: return [] return route['Route'] def get_status(self): file = "status.json" file_path = os.path.join(self.directory, file) try: last_file_stat = os.stat(file_path) except FileNotFoundError: return None self.has_new_status = False if self.last_status_update is None or last_file_stat.st_mtime > self.last_status_update: for _ in range(10): try: with open(file_path, 'r') as fp: tmp = self.__status self.__status = json.load(fp) self.__last_status = tmp self.last_status_update = last_file_stat.st_mtime self.has_new_status = True break except json.decoder.JSONDecodeError: time.sleep(0.1) return self.__status def get_race_details(self): return self.config.get_race_details() @property def events(self): return self.__events def now(self): return datetime.now()
def main(*args): default_dir = get_config_dir() parser = argparse.ArgumentParser( description='Show overlay for streaming purposes') parser.add_argument('activity', type=str, choices=['exploration', 'race', 'create-race'], default='exploration') parser.add_argument('--background', '-b', type=str, default='black', help='background color name (ex. black)') parser.add_argument('--size', '-s', type=str, default='720p', help="[width]x[height] or 720p or 1080p") parser.add_argument('--dir', '-d', type=str, default=default_dir, help="path to journal directory") parser.add_argument('--overlay', '-o', default=False, action='store_true', help="Overlay mode (windows only)") parser.add_argument('--config', '-c', type=str, default='', help="config path") parser.add_argument('--simulator', type=str, choices=['race', 'exploration'], default=None) parser.add_argument('--arg1', type=str, default=None) if len(args) == 0: args = None args = parser.parse_args(args) if args.activity == 'race' and args.arg1 is None: parser.error('--arg1 must be the race filename') journal_path = args.dir print('Journal directory: {}'.format(journal_path)) if args.size == '720p': size = (1280, 720) elif args.size == '1080p': size = (1920, 1080) else: size_pattern = re.compile("^[0-9]+x[0-9]+$") if args.size != '' and size_pattern.match(args.size): size = tuple(map(lambda i: int(i), args.size.split("x"))) else: size = None win = Window(size=size, is_overlay=args.overlay, color=pygame.Color(args.background)) watch_list = [] card_list = [] if args.config: config = Config(config_dir=args.config) else: config = Config() journal = JournalWatcher(watch=[], directory=journal_path, config=config) if args.activity == 'race': config.select_race(args.arg1) def append_card(card_class, **kwargs): c = card_class(win.screen, journal, **kwargs) for w in c.watched(): watch_list.append(w) card_list.append(c) if args.activity == 'exploration': # exploration card append_card(cards.ExplorationCard, position=(0, 1), card_size=(1, 2)) # current system card append_card(cards.CurrentSystemCard, position=(2, 1), text_align='right', card_size=(1, 2)) # route card append_card(cards.RouteCard, position=(0, 0), text_align='left', card_size=(3, 1)) elif args.activity == 'race': if args.overlay: append_card(cards.RaceCard, position=(2, 0), card_size=(1, 1)) else: append_card(cards.RaceCard, position=(0, 0), card_size=(3, 3)) elif args.activity == 'create-race': if args.overlay: append_card(cards.CreateRaceCard, position=(2, 0), card_size=(1, 1)) else: append_card(cards.CreateRaceCard, position=(0, 0), card_size=(3, 3)) else: raise NotImplementedError("{} not implemented".format(args.activity)) journal.watch = list(set(watch_list)) win.screen.fill(win.mask_color) sim = None if args.simulator: sim = SimRunner(Simulator(args.simulator).get_generator()) while win.loop(): if sim: sim.run() journal.refresh() win.screen.fill(win.mask_color) for card in card_list: card.render() pygame.display.update()
def test_dbinfo(self): Config.set_config(dummy_config()) self.assertEqual(Config.dbinfo(), {'a': 'b'})