def main(args): chunks = latest_chunks() parser, next_batch = chunks2batches(chunks) tfprocess = TFProcess(next_batch) if args: restore_file = args.pop(0) print("Restoring weights ....") tfprocess.restore(restore_file) print("Training starts ....") while True: change_data, run_val = tfprocess.process() if change_data: chunks = latest_chunks() parser.chunk_switch(chunks) if run_val: best_net = leela_conf.SAVE_DIR + "/best.txt" last_net = leela_conf.SAVE_DIR + "/latest.txt" cmd = leela_conf.VALIDATION_COMMAND % \ (last_net, best_net) print(cmd) subprocess.call(cmd.split(" ")) #, stdout=subprocess.PIPE) with open(leela_conf.VALIDATION_LOG, "r") as f: better = int(f.readlines()[-1].split("\t")[0]) if better: print("---------------- Better Network Found! --------------") copy2(last_net, best_net) else: print("------------- Checkout best net so far. -------------") tfprocess.replace_weights(get_weights(best_net))
name='x') probs = tf.placeholder(tf.float32, [None, BOARD_SIZE * BOARD_SIZE + 1]) winner = tf.placeholder(tf.float32, [None, 1]) else: planes = tf.placeholder(tf.float32, [None, FEATURES, BOARD_SIZE, BOARD_SIZE], name='x') probs = tf.placeholder(tf.float32, [None, BOARD_SIZE * BOARD_SIZE + 1]) winner = tf.placeholder(tf.float32, [None, 1]) tfprocess = TFProcess() tfprocess.TFCOREML = True tfprocess.DATA_FORMAT = data_format tfprocess.BOARD_SIZE = BOARD_SIZE tfprocess.INPUT_DIM = 2 tfprocess.FEATURES = FEATURES tfprocess.RESIDUAL_FILTERS = filters tfprocess.RESIDUAL_BLOCKS = blocks if BOARD_SIZE == 9: tfprocess.VALUE_FULLY_CONNECTED = 64 tfprocess.training = False # batch normalizationをコンバートするため tfprocess.init_net(planes, probs, winner) tfprocess.replace_weights(weights) tf.train.write_graph(tf.get_default_graph(), os.path.dirname(sys.argv[3]), os.path.basename(sys.argv[3]), as_text=True) with tf.get_default_graph().as_default(): saver = tf.train.Saver() print(saver.save(tfprocess.session, "./tmp/model.ckpt"))
weights = [] for e, line in enumerate(f): if e == 0: #Version print("Version", line.strip()) if line != '1\n': raise ValueError("Unknown version {}".format(line.strip())) else: weights.append(list(map(float, line.split(' ')))) if e == 2: channels = len(line.split(' ')) print("Channels", channels) blocks = e - (4 + 14) if blocks % 8 != 0: raise ValueError("Inconsistent number of weights in the file") blocks //= 8 print("Blocks", blocks) tfprocess = TFProcess() tfprocess.init(batch_size=1) if tfprocess.RESIDUAL_BLOCKS != blocks: raise ValueError("Number of blocks in tensorflow model doesn't match "\ "number of blocks in input network") if tfprocess.RESIDUAL_FILTERS != channels: raise ValueError("Number of filters in tensorflow model doesn't match "\ "number of filters in input network") tfprocess.replace_weights(weights) path = os.path.join(os.getcwd(), "leelaz-model") save_path = tfprocess.saver.save(tfprocess.session, path, global_step=0)
print("Version", line.strip()) if line != '1\n': raise ValueError("Unknown version {}".format(line.strip())) else: weights.append(list(map(float, line.split(' ')))) if e == 2: channels = len(line.split(' ')) print("Channels", channels) blocks = e - (4 + 14) if blocks % 8 != 0: raise ValueError("Inconsistent number of weights in the file") blocks /= 8 print("Blocks", blocks) return weights if __name__ == '__main__': gpu_num = 2 x = [[ tf.placeholder(tf.float32, [None, 18, 19 * 19]), tf.placeholder(tf.float32, [None, 362]), tf.placeholder(tf.float32, [None, 1]) ] for _ in range(gpu_num)] tfprocess = TFProcess(x) tfprocess.save_leelaz_weights("./save/random.txt") path = os.path.join(leela_conf.SAVE_DIR, "leelaz-model") tfprocess.replace_weights(get_weights(sys.argv[1])) print("saved to: ", path) save_path = tfprocess.save(0, path)