def load_model(self): """ Loads the next generation model from the appropriate directory. If not found, loads the best known model. """ model = ChessModel(self.config) rc = self.config.resource dirs = get_next_generation_model_dirs(rc) if not dirs: logger.debug("loading best model") if self.config.opts.new and not load_best_model_weight(model): model.build() save_as_best_model(model) elif not load_best_model_weight(model): raise RuntimeError("Best model can not loaded!") else: latest_dir = dirs[-1] logger.debug("loading latest model") config_path = os.path.join( latest_dir, rc.next_generation_model_config_filename) weight_path = os.path.join( latest_dir, rc.next_generation_model_weight_filename) model.load(config_path, weight_path) return model
def load_current_model(self): """ Loads the best model from the standard directory. :return ChessModel: the model """ model = ChessModel(self.config) load_best_model_weight(model) return model
def load_model(self): from chess_zero.agent.model_chess import ChessModel model = ChessModel(self.config) if self.config.opts.new or not load_best_model_weight(model): model.build() save_as_best_model(model) return model
def get_player(config): from chess_zero.agent.model_chess import ChessModel from chess_zero.lib.model_helper import load_best_model_weight model = ChessModel(config) if not load_best_model_weight(model): raise RuntimeError("Best model not found!") return ChessPlayer(config, model.get_pipes(config.play.search_threads))
def start(config: Config): PlayWithHumanConfig().update_play_config(config.play) me_player = None env = ChessEnv().reset() app = Flask(__name__) model = ChessModel(config) if not load_best_model_weight(model): raise RuntimeError("Best model not found!") player = ChessPlayer(config, model.get_pipes(config.play.search_threads)) @app.route('/play', methods=["GET", "POST"]) def play(): data = request.get_json() print(data["position"]) env.update(data["position"]) env.step(data["moves"], False) bestmove = player.action(env, False) return jsonify(bestmove) app.run(host="0.0.0.0", port="8080")
def training(self): tc = self.config.trainer last_load_data_step = last_save_step = total_steps = self.config.trainer.start_total_steps meta_dir = 'data/model' meta_file = os.path.join(meta_dir, 'metadata.json') file_dir = 'data/model/next_generation' h5_file = os.path.join(file_dir, 'weights.{epoch:02d}.h5') self.meta_writer = OptimizeWorker(meta_file) self.early_stopping = EarlyStopping(monitor='val_loss') self.check_point = ModelCheckpoint(filepath=h5_file, monitor='val_loss', verbose=1) while True: self.load_play_data() if (self.dataset_size * (1 - self.validation)) < tc.batch_size: while (self.dataset_size * (1 - self.validation)) < tc.batch_size: self_play = SelfPlayWorker(self.config, env=ChessEnv(), model=self.model) self_play.start() self.load_play_data() else: self_play = SelfPlayWorker(self.config, env=ChessEnv(), model=self.model) self_play.start() self.load_play_data() self.compile_model() self.update_learning_rate(total_steps) steps = self.train_epoch(self.config.trainer.epoch_to_checkpoint) total_steps += steps if True: self.save_current_model() last_save_step = total_steps #net_params = ChessModel(self.config).get_policy_param() #pickle.dump(net_params, open('current_policy.model', 'wb'), pickle.HIGHEST_PROTOCOL) k.clear_session() load_best_model_weight(self.model)
def load_model(self): """ Load the current best model :return ChessModel: current best model """ model = ChessModel(self.config) if self.config.opts.new or not load_best_model_weight(model): model.build() save_as_best_model(model) return model
def get_player_from_model(config): try: model = ChessModel(config) if not load_best_model_weight(model): raise RuntimeError("Best model not found!") return ChessPlayer(config, model.get_pipes(config.play.search_threads)) except Exception as e: traceback.print_exc() log.error(e) return None
def get_player_from_model(config): try: from chess_zero.agent.player_chess import ChessPlayer from chess_zero.agent.model_chess import ChessModel from chess_zero.lib.model_helper import load_best_model_weight model = ChessModel(config) if not load_best_model_weight(model): raise RuntimeError("Best model not found!") return ChessPlayer(config, model.get_pipes(config.play.search_threads)) except Exception as e: traceback.print_exc() log.error(e) return None
def load_model(self): from chess_zero.agent.model_chess import ChessModel model = ChessModel(self.config) rc = self.config.resource dirs = get_next_generation_model_dirs(rc) if not dirs: logger.debug(f"loading best model") if not load_best_model_weight(model): raise RuntimeError(f"Best model can not loaded!") else: latest_dir = dirs[-1] logger.debug(f"loading latest model") config_path = os.path.join(latest_dir, rc.next_generation_model_config_filename) weight_path = os.path.join(latest_dir, rc.next_generation_model_weight_filename) model.load(config_path, weight_path) return model
def load_model(self): """ Loads the next generation model from the appropriate directory. If not found, loads the best known model. """ model = ChessModel(self.config) rc = self.config.resource dirs = get_next_generation_model_dirs(rc) if not dirs: logger.debug("loading best model") if not load_best_model_weight(model): raise RuntimeError("Best model can not loaded!") else: latest_dir = dirs[-1] logger.debug("loading latest model") config_path = os.path.join(latest_dir, rc.next_generation_model_config_filename) weight_path = os.path.join(latest_dir, rc.next_generation_model_weight_filename) model.load(config_path, weight_path) return model
def load_current_model(self): model = ChessModel(self.config) load_best_model_weight(model) return model
def load_model(self): model = ChessModel(self.config) if self.config.opts.new or not load_best_model_weight(model): model.build() save_as_best_model(model) return model
def _load_model(self): from chess_zero.agent.model_chess import ChessModel model = ChessModel(self.config) if not load_best_model_weight(model): raise RuntimeError("Best model not found!") return model
def start(self): global job_done global thr_free global env global data global futures self.buffer = [] need_to_renew_model = True job_done.acquire(True) futures = [] with ProcessPoolExecutor( max_workers=self.config.play.max_processes) as executor: game_idx = 0 while True: game_idx += 1 start_time = time() if need_to_renew_model and len(futures) == 0: load_best_model_weight(self.current_model) for i in range(self.config.play.max_processes): ff = executor.submit(self_play_buffer, self.config, cur=self.cur_pipes) ff.add_done_callback(recall_fn) futures.append(ff) need_to_renew_model = False job_done.acquire(True) #env, data = futures.popleft().result() if env.resigned: resigned = 'by resign ' else: resigned = ' ' print("game %3d time=%5.1fs " "%3d %s " "%s" % (game_idx, time() - start_time, env.num_halfmoves, env.winner, resigned)) print('game %3d time=%5.1fs ' % (game_idx, time() - start_time)) self.buffer += data if (game_idx % self.config.play_data.nb_game_in_file) == 0: self.flush_buffer() if need_to_reload_best_model_weight(self.current_model): need_to_renew_model = True self.remove_play_data(all=False) # remove old data if not need_to_renew_model: # avoid congestion ff = executor.submit(self_play_buffer, self.config, cur=self.cur_pipes) ff.add_done_callback(recall_fn) futures.append(ff) # Keep it going thr_free.release() if len(data) > 0: self.flush_buffer()