def set_config(self, line): util.set_config(line, self.config, C, log) cmd, tail = util.parse_cmdline2(line, self.config, log) if cmd == 'log-level': log.set_log_level(self.config['log-level'].value) if cmd == 'wav-prefix': self.transport.set_wav_prefix(tail)
def mcts_fn(config, q_mcts_to_train, q_mcts_to_eval, p_eval_to_mcts, process_id): set_config(config) print('Starting MCTS process %s' % process_id) sys.stdout.flush() np.random.seed(process_id) p_em_recv, p_em_send = p_eval_to_mcts p_em_send.close() def eval_state(state): # randomly rotate and flip before evaluating k = np.random.randint(4) flip = np.random.randint(2) if k != 0: state = np.rot90(state, k=k, axes=(-2, -1)) if flip: state = np.flip(state, axis=-1) q_mcts_to_eval.put((process_id, state.tolist())) v, p = p_em_recv.recv() p = np.array(p, dtype=np.float32) if flip: p = np.flip(p, axis=-1) if k != 0: p = np.rot90(p, k=-k, axes=(-2, -1)) return v, p # (curr_player, opponent, last_opponent_move, is_curr_player_first) start_state = get_start_state() mcts = MCTS(start_state, eval_state) while True: q_mcts_to_train.put(tuple(x.tolist() for x in mcts.run()))
def set(self, line): cmd, tail = util.parse_cmdline2(line, self.config, log) if not cmd: log.e('Name not found: {}'.format(line)) return if cmd == 'receive': log.i('Cannot set receive') return util.set_config(line, self.config, kprotocol.C, log)
def play_game(model_first, model_second): config_first, config_second = model_first.config, model_second.config def get_eval_fn(model): def eval_fn(state): with torch.no_grad(): v, p = model.fit_batch((np.array([state]), ), train=False) return v, p[0] return eval_fn eval_first, eval_second = map(get_eval_fn, [model_first, model_second]) set_config(config_first) start_state = get_start_state() curr = MCTSNode(start_state, evaluator=eval_first) next = MCTSNode(start_state, evaluator=eval_second) config, next_config = config_first, config_second info = [] for _ in RangeProgress(0, config_first.board_dim**2, desc='Moves'): set_config(config) start = time() if config.eval_mcts_iterations == 0: score = curr.p else: for _ in RangeProgress(0, config.eval_mcts_iterations, desc='MCTS'): curr.select() score = curr.N move = np.unravel_index(score.argmax(), score.shape) info.append( dict(state=curr.state, curr_p=curr.p, curr_v=curr.value, curr_W=curr.W, curr_N=curr.N, next_p=next.p, next_v=next.value, move=move, time=time() - start)) next, curr = curr.step(move), next.step(move) config, next_config = next_config, config if curr.terminal: break merged_info = {k: [info_i[k] for info_i in info] for k in info[0].keys()} merged_info['state'].append(curr.state) merged_info['curr_v'].append(-1) merged_info['next_v'].append(-1) return {k: np.array(v) for k, v in merged_info.items()}
def configure_next_source(chat_id, text_func, so_far): for key, m in meta.items(): if key in so_far: continue options = [ ("Yes", { "question": "show", "answer": "yes", "so_far": so_far, "subject": key }), ("No", { "question": "show", "answer": "no", "so_far": so_far, "subject": key }), ] markup = create_keyboard(options) text_func( "%s: %s\n\nWould you like to see rates from %s? Default: %s" % (m['name'], m['desc'], m['name'], str(m['default_enabled'])), reply_markup=markup) return print(so_far) summary = "" for k, config in so_far.items(): if len(summary) > 0: summary += "\n" summary += "%s: show: %s" % (meta[k]['name'], str(config['sub'])) if config['sub']: summary += " notify: %s" % str(config['notify']) text_func("Done! Summary:\n\n%s" % summary) set_config(chat_id, so_far) _subscribe(chat_id)
layout = [[sg.Text('Theme Browser')], [sg.Text('Click a Theme color to see demo window')], [ sg.Listbox(values=sg.theme_list(), size=(20, 12), key='-LIST-', enable_events=True) ], [ sg.Text('Selected theme: %s' % selected_theme, key='selected_theme', auto_size_text=True) ], [sg.Button('Exit')]] window = sg.Window('Theme Browser', layout) while True: # Event Loop event, values = window.read() if event in (None, 'Exit'): # write new theme to settings.conf config["gui"]["theme"] = selected_theme util.set_config(config) break selected_theme = values['-LIST-'][0] sg.theme(selected_theme) sg.popup_get_text(f'This is {selected_theme}') window["selected_theme"].update(f"Selected theme: {selected_theme}") window.close()
""" Send the `config` dict from a test class definition to the emulator. Best run with `watchdog` or similar to monitor file changes. """ import argparse import os import sys import test_screenshots from util import set_config PORT = os.environ.get('MOCK_SERVER_PORT') parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('test_class', help='name of test class (e.g. TestLayoutD)') parser.add_argument('--platform', default='basalt', help='which emulator') args = parser.parse_args() config = getattr(test_screenshots, args.test_class)().config config['__CLEAR_CACHE__'] = True if PORT: config['nightscout_url'] = 'http://localhost:{}'.format(PORT) set_config(config, [args.platform])
from tqdm import tqdm import torch import numpy as np from model import Model from mcts import MCTSNode from util import Config, set_config, get_start_state, step_state sys.path.append('../piskvork_remote') import remote_brain from remote_brain import Brain, main config = Config('results_12x12').load().var(device='cuda:2') set_config(config) # config.eval_mcts_iterations = 0 model = Model(config).set_state(config.load_max_model_state(min_epoch=-1)) def evaluator(state): with torch.no_grad(): v, p = model.fit_batch((np.array([state]), ), train=False) return v, p[0] class AlphaZero(Brain): def info_init(self): super().info_init() self.info_text = 'name="pbrain-alphazero", author="Zhongxia Yan", version="0.0", country="USA", www="https://github.com/ZhongxiaYan/gomoku_ai"'
def set_config(self, line): util.set_config(line, self.config, C, log)
import json import os import sys import test_screenshots from util import BASE_CONFIG from util import set_config PORT = os.environ.get('MOCK_SERVER_PORT') parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--test-class', help='name of test class (e.g. TestLayoutD)') parser.add_argument('--config-json', help='JSON config string to set') parser.add_argument('--platform', default='basalt', help='which emulator') args = parser.parse_args() if args.test_class: test_instance = getattr(test_screenshots, args.test_class)() config = dict(BASE_CONFIG, **getattr(test_instance, 'config', {})) elif args.config_json: config = json.loads(args.config_json) else: print "Must specify either --test-class or --config-json" sys.exit(1) config['__CLEAR_CACHE__'] = True if PORT: config['nightscout_url'] = 'http://localhost:{}'.format(PORT) set_config(config, [args.platform])
from util import set_config, get_config from misc import print_exc_info # ---------------------------------------------------------------------------- # # App Config. # ---------------------------------------------------------------------------- # from config import USE_ORM ORM = USE_ORM ENGINE = not ORM app = Flask(__name__) moment = Moment(app) app.config.from_object('config') set_config(app.config) LOCALE = get_config("DEFAULT_LOCALE") if ORM: from models import SQLAlchemyDB as db from misc import latest_lists_orm as latest_lists db.init_app(app) migrate = Migrate(app, db) else: # ENGINE from misc.engine import setup from misc import latest_lists_engine as latest_lists setup()