def __init__(self, cfg, dirname): self.parser = ChunkParser(cfg, dirname) self.cfg = cfg self.xsize = cfg.xsize self.ysize = cfg.ysize self.input_channels = cfg.input_channels self.input_features = cfg.input_features self.policy_map = cfg.policy_map
def main(): if len(sys.argv) != 2: print("Usage: {} config.yaml".format(sys.argv[0])) return 1 cfg = yaml.safe_load(open(sys.argv[1], 'r').read()) print(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks) num_train = int(num_chunks*cfg['dataset']['train_ratio']) shuffle_size = cfg['training']['shuffle_size'] ChunkParser.BATCH_SIZE = cfg['training']['batch_size'] root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) #bench_parser = ChunkParser(FileDataSrc(chunks[:1000]), shuffle_size=1<<14, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE) #benchmark(bench_parser) train_parser = ChunkParser(FileDataSrc(chunks[:num_train]), shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE) #benchmark(train_parser) dataset = tf.data.Dataset.from_generator( train_parser.parse, output_types=(tf.string, tf.string, tf.string)) dataset = dataset.map(ChunkParser.parse_function) dataset = dataset.prefetch(4) train_iterator = dataset.make_one_shot_iterator() test_parser = ChunkParser(FileDataSrc(chunks[num_train:]), batch_size=ChunkParser.BATCH_SIZE) dataset = tf.data.Dataset.from_generator( test_parser.parse, output_types=(tf.string, tf.string, tf.string)) dataset = dataset.map(ChunkParser.parse_function) dataset = dataset.prefetch(4) test_iterator = dataset.make_one_shot_iterator() tfprocess = TFProcess(cfg) tfprocess.init(dataset, train_iterator, test_iterator) if os.path.exists(os.path.join(root_dir, 'checkpoint')): cp = get_checkpoint(root_dir) tfprocess.restore(cp) # Sweeps through all test chunks statistically num_evals = int(round(((num_chunks-num_train) * (200 / SKIP)) / ChunkParser.BATCH_SIZE)) print("Using {} evaluation batches".format(num_evals)) # while True: for _ in range(cfg['training']['total_steps']): tfprocess.process(ChunkParser.BATCH_SIZE, num_evals)
def extract_data(parser: ChunkParser, chunkdata): lst = [] gen = parser.sample_record(chunkdata) for s in gen: (planes, probs, winner), (ver, probs2, planes, us_ooo, us_oo, them_ooo, them_oo, stm, rule50_plane, move_count, winner, planes1) = parser.convert_v3_to_tuple(s, return_planes=True) shape = {'planes': planes1, 'probs': probs, 'winner': winner} lst.append(shape) return lst
def main(args): train_data_prefix = args.pop(0) chunks = get_chunks(train_data_prefix) print("Found {0} chunks".format(len(chunks))) if not chunks: return # The following assumes positions from one game are not # spread through chunks. random.shuffle(chunks) training, test = split_chunks(chunks, 0.1) print("Training with {0} chunks, validating on {1} chunks".format( len(training), len(test))) train_parser = ChunkParser(FileDataSrc(training), shuffle_size=1 << 19, sample=DOWN_SAMPLE, batch_size=BATCH_SIZE) #benchmark(train_parser) dataset = tf.data.Dataset.from_generator(train_parser.parse, output_types=(tf.string, tf.string, tf.string)) dataset = dataset.map(_parse_function) dataset = dataset.prefetch(4) train_iterator = dataset.make_one_shot_iterator() test_parser = ChunkParser(FileDataSrc(test), shuffle_size=1 << 19, sample=DOWN_SAMPLE, batch_size=BATCH_SIZE) dataset = tf.data.Dataset.from_generator(test_parser.parse, output_types=(tf.string, tf.string, tf.string)) dataset = dataset.map(_parse_function) dataset = dataset.prefetch(4) test_iterator = dataset.make_one_shot_iterator() tfprocess = TFProcess() tfprocess.init(dataset, train_iterator, test_iterator) #benchmark1(tfprocess) if args: restore_file = args.pop(0) tfprocess.restore(restore_file) while True: tfprocess.process(BATCH_SIZE)
def main(cmd): cfg = yaml.safe_load(cmd.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks) train_ratio = cfg['dataset']['train_ratio'] num_train = int(num_chunks * train_ratio) shuffle_size = cfg['training']['shuffle_size'] ChunkParser.BATCH_SIZE = cfg['training']['batch_size'] root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) train_parser = ChunkParser(FileDataSrc(chunks[:num_train]), shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE) dataset = tf.data.Dataset.from_generator(train_parser.parse, output_types=(tf.string, tf.string, tf.string)) dataset = dataset.map(ChunkParser.parse_function) dataset = dataset.prefetch(4) train_iterator = dataset.make_one_shot_iterator() shuffle_size = int(shuffle_size * (1.0 - train_ratio)) test_parser = ChunkParser(FileDataSrc(chunks[num_train:]), shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE) dataset = tf.data.Dataset.from_generator(test_parser.parse, output_types=(tf.string, tf.string, tf.string)) dataset = dataset.map(ChunkParser.parse_function) dataset = dataset.prefetch(4) test_iterator = dataset.make_one_shot_iterator() tfprocess = TFProcess(cfg) tfprocess.init(dataset, train_iterator, test_iterator) if os.path.exists(os.path.join(root_dir, 'checkpoint')): cp = get_checkpoint(root_dir) tfprocess.restore(cp) # Sweeps through all test chunks statistically num_evals = (num_chunks - num_train) * 10 // ChunkParser.BATCH_SIZE print("Using {} evaluation batches".format(num_evals)) for _ in range(cfg['training']['total_steps']): tfprocess.process(ChunkParser.BATCH_SIZE, num_evals) tfprocess.save_leelaz_weights(cmd.output) tfprocess.session.close() train_parser.shutdown() test_parser.shutdown()
def chunk_parser(q_in, q_out, shuffle_size, chunk_size): """ Parse input chunks from 'q_in', shuffle, and put chunks of moves in v2 format into 'q_out' Each output chunk contains 'chunk_size' moves. Moves are shuffled in a buffer of 'shuffle_size' moves. (A 2^20 items shuffle buffer is ~ 2.2GB of RAM). """ workers = max(1, mp.cpu_count() - 2) parse = ChunkParser(QueueChunkSrc(q_in), shuffle_size=shuffle_size, workers=workers) gen = parse.v2_gen() while True: s = list(itertools.islice(gen, chunk_size)) if not len(s): break s = b''.join(s) q_out.put(s) q_out.put('STOP')
def main(cmd): cfg = yaml.safe_load(cmd.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks) train_ratio = cfg['dataset']['train_ratio'] num_train = int(num_chunks*train_ratio) shuffle_size = cfg['training']['shuffle_size'] ChunkParser.BATCH_SIZE = cfg['training']['batch_size'] root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) train_parser = ChunkParser(FileDataSrc(chunks[:num_train]), shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE) dataset = tf.data.Dataset.from_generator( train_parser.parse, output_types=(tf.string, tf.string, tf.string)) dataset = dataset.map(ChunkParser.parse_function) dataset = dataset.prefetch(4) train_iterator = dataset.make_one_shot_iterator() shuffle_size = int(shuffle_size*(1.0-train_ratio)) test_parser = ChunkParser(FileDataSrc(chunks[num_train:]), shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE) dataset = tf.data.Dataset.from_generator( test_parser.parse, output_types=(tf.string, tf.string, tf.string)) dataset = dataset.map(ChunkParser.parse_function) dataset = dataset.prefetch(4) test_iterator = dataset.make_one_shot_iterator() tfprocess = TFProcess(cfg) tfprocess.init(dataset, train_iterator, test_iterator) if os.path.exists(os.path.join(root_dir, 'checkpoint')): cp = get_checkpoint(root_dir) tfprocess.restore(cp) # Sweeps through all test chunks statistically num_evals = (num_chunks-num_train)*10 // ChunkParser.BATCH_SIZE print("Using {} evaluation batches".format(num_evals)) for _ in range(cfg['training']['total_steps']): tfprocess.process(ChunkParser.BATCH_SIZE, num_evals) tfprocess.save_leelaz_weights(cmd.output) tfprocess.session.close() train_parser.shutdown() test_parser.shutdown()
def main(cmd): cfg = yaml.safe_load(cmd.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] train_ratio = cfg['dataset']['train_ratio'] num_train = int(num_chunks*train_ratio) num_test = num_chunks - num_train if 'input_test' in cfg['dataset']: train_chunks = get_latest_chunks(cfg['dataset']['input_train'], num_train) test_chunks = get_latest_chunks(cfg['dataset']['input_test'], num_test) else: chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks) train_chunks = chunks[:num_train] test_chunks = chunks[num_train:] shuffle_size = cfg['training']['shuffle_size'] total_batch_size = cfg['training']['batch_size'] batch_splits = cfg['training'].get('num_batch_splits', 1) if total_batch_size % batch_splits != 0: raise ValueError('num_batch_splits must divide batch_size evenly') split_batch_size = total_batch_size // batch_splits # Load data with split batch size, which will be combined to the total batch size in tfprocess. ChunkParser.BATCH_SIZE = split_batch_size root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) train_parser = ChunkParser(FileDataSrc(train_chunks), shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE) dataset = tf.data.Dataset.from_generator( train_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string)) dataset = dataset.map(ChunkParser.parse_function) dataset = dataset.prefetch(4) train_iterator = dataset.make_one_shot_iterator() shuffle_size = int(shuffle_size*(1.0-train_ratio)) test_parser = ChunkParser(FileDataSrc(test_chunks), shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE) dataset = tf.data.Dataset.from_generator( test_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string)) dataset = dataset.map(ChunkParser.parse_function) dataset = dataset.prefetch(4) test_iterator = dataset.make_one_shot_iterator() tfprocess = TFProcess(cfg) tfprocess.init(dataset, train_iterator, test_iterator) if os.path.exists(os.path.join(root_dir, 'checkpoint')): cp = tf.train.latest_checkpoint(root_dir) tfprocess.restore(cp) # If number of test positions is not given # sweeps through all test chunks statistically # Assumes average of 10 samples per test game. # For simplicity, testing can use the split batch size instead of total batch size. # This does not affect results, because test results are simple averages that are independent of batch size. num_evals = cfg['training'].get('num_test_positions', num_test * 10) num_evals = max(1, num_evals // ChunkParser.BATCH_SIZE) print("Using {} evaluation batches".format(num_evals)) tfprocess.process_loop(total_batch_size, num_evals, batch_splits=batch_splits) if cmd.output is not None: tfprocess.save_leelaz_weights(cmd.output) tfprocess.session.close() train_parser.shutdown() test_parser.shutdown()
def main(): parser = argparse.ArgumentParser( description='Train network from game data.') parser.add_argument("blockspref", help="Number of blocks", nargs='?', type=int) parser.add_argument("filterspref", help="Number of filters", nargs='?', type=int) parser.add_argument("trainpref", help='Training file prefix', nargs='?', type=str) parser.add_argument("restorepref", help='Training snapshot prefix', nargs='?', type=str) parser.add_argument("--blocks", '-b', help="Number of blocks", type=int) parser.add_argument("--filters", '-f', help="Number of filters", type=int) parser.add_argument("--train", '-t', help="Training file prefix", type=str) parser.add_argument("--test", help="Test file prefix", type=str) parser.add_argument("--restore", type=str, help="Prefix of tensorflow snapshot to restore from") parser.add_argument( "--logbase", default='leelalogs', type=str, help="Log file prefix (for tensorboard) (default: %(default)s)") parser.add_argument( "--sample", default=DOWN_SAMPLE, type=int, help="Rate of data down-sampling to use (default: %(default)d)") args = parser.parse_args() blocks = args.blocks or args.blockspref filters = args.filters or args.filterspref train_data_prefix = args.train or args.trainpref restore_prefix = args.restore or args.restorepref if not blocks or not filters: print("Must supply number of blocks and filters") return training = get_chunks(train_data_prefix) if not args.test: # Generate test by taking 10% of the training chunks. random.shuffle(training) print("here1") training, test = split_chunks(training, 0.1) else: test = get_chunks(args.test) if not training: print("No data to train on!") return print("Training with {0} chunks, validating on {1} chunks".format( len(training), len(test))) train_parser = ChunkParser( FileDataSrc(training), shuffle_size=1 << 20, # 2.2GB of RAM. sample=args.sample, batch_size=RAM_BATCH_SIZE).parse() test_parser = ChunkParser(FileDataSrc(test), shuffle_size=1 << 19, sample=args.sample, batch_size=RAM_BATCH_SIZE).parse() tfprocess = TFProcess(blocks, filters) tfprocess.init(RAM_BATCH_SIZE, logbase=args.logbase, macrobatch=BATCH_SIZE // RAM_BATCH_SIZE) #benchmark1(tfprocess) if restore_prefix: tfprocess.restore(restore_prefix) tfprocess.process(train_parser, test_parser)
def main(cmd): cfg = yaml.safe_load(cmd.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] allow_less = cfg['dataset'].get('allow_less_chunks', False) train_ratio = cfg['dataset']['train_ratio'] experimental_parser = cfg['dataset'].get('experimental_v5_only_dataset', False) # num_train = int(num_chunks * train_ratio) # we just need to use one data loader, just put everything into train num_train = int(num_chunks) num_test = num_chunks - num_train sort_type = cfg['dataset'].get('sort_type', 'mtime') if sort_type == 'mtime': sort_key_fn = os.path.getmtime elif sort_type == 'number': sort_key_fn = game_number_for_name elif sort_type == 'name': sort_key_fn = identity_function else: raise ValueError('Unknown dataset sort_type: {}'.format(sort_type)) if 'input_test' in cfg['dataset']: train_chunks = get_latest_chunks(cfg['dataset']['input_train'], num_train, allow_less, sort_key_fn) test_chunks = get_latest_chunks(cfg['dataset']['input_test'], num_test, allow_less, sort_key_fn) else: chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks, allow_less, sort_key_fn) if allow_less: num_train = int(len(chunks) * train_ratio) num_test = len(chunks) - num_train train_chunks = chunks[:num_train] test_chunks = chunks[num_train:] # shuffle_size = cfg['training']['shuffle_size'] shuffle_size = 1 total_batch_size = cfg['training']['batch_size'] batch_splits = cfg['training'].get('num_batch_splits', 1) train_workers = cfg['dataset'].get('train_workers', None) test_workers = cfg['dataset'].get('test_workers', None) if total_batch_size % batch_splits != 0: raise ValueError('num_batch_splits must divide batch_size evenly') split_batch_size = total_batch_size // batch_splits # Load data with split batch size, which will be combined to the total batch size in tfprocess. ChunkParser.BATCH_SIZE = split_batch_size value_focus_min = cfg['training'].get('value_focus_min', 1) value_focus_slope = cfg['training'].get('value_focus_slope', 0) root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) tfprocess = TFProcess(cfg) experimental_reads = max(2, mp.cpu_count() - 2) // 2 extractor = select_extractor(tfprocess.INPUT_MODE) if experimental_parser and (value_focus_min != 1 or value_focus_slope != 0): raise ValueError( 'Experimental parser does not support non-default value \ focus parameters.') def read(x): return tf.data.FixedLengthRecordDataset( x, 8308, compression_type='GZIP', num_parallel_reads=experimental_reads) if experimental_parser: # train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).shuffle(len(train_chunks)).repeat().batch(256)\ train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).repeat().batch(256)\ .interleave(read, num_parallel_calls=1)\ .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\ .batch(split_batch_size).map(extractor) # .shuffle(shuffle_size)\ # .batch(split_batch_size).map(extractor) else: train_parser = ChunkParser(train_chunks, tfprocess.INPUT_MODE, shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE, value_focus_min=value_focus_min, value_focus_slope=value_focus_slope, workers=train_workers) train_dataset = tf.data.Dataset.from_generator( train_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string, tf.string)) train_dataset = train_dataset.map(ChunkParser.parse_function) shuffle_size = int(shuffle_size * (1.0 - train_ratio)) if experimental_parser: # test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).shuffle(len(test_chunks)).repeat().batch(256)\ test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).repeat().batch(256)\ .interleave(read, num_parallel_calls=2)\ .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\ .batch(split_batch_size).map(extractor) # .shuffle(shuffle_size)\ # .batch(split_batch_size).map(extractor) else: # no value focus for test_parser test_parser = ChunkParser(test_chunks, tfprocess.INPUT_MODE, shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE, workers=test_workers) test_dataset = tf.data.Dataset.from_generator( test_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string, tf.string)) test_dataset = test_dataset.map(ChunkParser.parse_function) validation_dataset = None if 'input_validation' in cfg['dataset']: valid_chunks = get_all_chunks(cfg['dataset']['input_validation']) validation_dataset = tf.data.FixedLengthRecordDataset(valid_chunks, 8308, compression_type='GZIP', num_parallel_reads=experimental_reads)\ .batch(split_batch_size, drop_remainder=True).map(extractor) if tfprocess.strategy is None: #Mirrored strategy appends prefetch itself with a value depending on number of replicas train_dataset = train_dataset.prefetch(4) test_dataset = test_dataset.prefetch(4) if validation_dataset is not None: validation_dataset = validation_dataset.prefetch(4) else: options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF train_dataset = train_dataset.with_options(options) test_dataset = test_dataset.with_options(options) if validation_dataset is not None: validation_dataset = validation_dataset.with_options(options) ########################## # Custom Additions # ########################## tfprocess.init_v2(train_dataset, test_dataset, validation_dataset) # load net from weights file given in yaml config tfprocess.replace_weights_v2(proto_filename=cmd.net, ignore_errors=False) tfprocess.model.summary() for layer_name, path in zip(cmd.layer, cmd.path): # sort data files train_chunks = sorted(train_chunks) # create predictor that gives access to specific intermediate layer layer = tfprocess.model.get_layer(layer_name) earlyPredictor = tf.keras.models.Model( tfprocess.model.inputs, [tfprocess.model.inputs, tfprocess.model.outputs, layer.output]) # create custom iterator which doesn't shuffle the data etc custom_parse_gen = train_parser.custom_parse(train_chunks) turn_counter = 0 custom_iter = iter(custom_parse_gen) # prepare dataframe df = pd.DataFrame() # iterate entire dataset generator / iterator for data in custom_iter: #i in range(30): # data = next(custom_iter) planes, probs, winner, best_q = train_parser.custom_get_batch(data) x = planes print('predicting...') _, _, layer_results = earlyPredictor.predict(x) # append to dataframe # df = df.append(pd.DataFrame(activation_31.reshape(-1,128*8*8))) shape_tuple = (-1, np.prod(layer.output_shape[1:])) df = df.append(pd.DataFrame(layer_results.reshape(shape_tuple))) turn_counter += len(x) df.info() df.to_csv(path) print('done') train_parser.shutdown() test_parser.shutdown()
def main(cmd): cfg = yaml.safe_load(cmd.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks) train_ratio = cfg['dataset']['train_ratio'] num_train = int(num_chunks*train_ratio) shuffle_size = cfg['training']['shuffle_size'] ChunkParser.BATCH_SIZE = cfg['training']['batch_size'] root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) train_parser = ChunkParser(FileDataSrc(chunks[:num_train]), shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE) dataset = tf.data.Dataset.from_generator( train_parser.parse, output_types=(tf.string, tf.string, tf.string)) dataset = dataset.map(ChunkParser.parse_function) dataset = dataset.prefetch(4) train_iterator = dataset.make_one_shot_iterator() shuffle_size = int(shuffle_size*(1.0-train_ratio)) test_parser = ChunkParser(FileDataSrc(chunks[num_train:]), shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE) dataset = tf.data.Dataset.from_generator( test_parser.parse, output_types=(tf.string, tf.string, tf.string)) dataset = dataset.map(ChunkParser.parse_function) dataset = dataset.prefetch(4) test_iterator = dataset.make_one_shot_iterator() tfprocess = TFProcess(cfg) tfprocess.init(dataset, train_iterator, test_iterator) if os.path.exists(os.path.join(root_dir, 'checkpoint')): cp = get_checkpoint(root_dir) tfprocess.restore(cp) # Sweeps through all test chunks statistically num_evals = (num_chunks-num_train)*10 // ChunkParser.BATCH_SIZE print("Using {} evaluation batches".format(num_evals)) for _ in range(cfg['training']['total_steps']): tfprocess.process(ChunkParser.BATCH_SIZE, num_evals) tfprocess.save_leelaz_weights('/tmp/weights.txt') with open('/tmp/weights.txt', 'rb') as f: m = hashlib.sha256() w = f.read() m.update(w) digest = m.hexdigest() filename = '/tmp/{}.gz'.format(digest) with gzip.open(filename, 'wb') as f: f.write(w) if cmd.upload: metadata = {'training_id':'1', 'layers':cfg['model']['residual_blocks'], 'filters':cfg['model']['filters']} print("\nUploading `{}'...".format(digest[:8]), end='') upload(cmd.upload, metadata, filename) print("[done]\n") else: print("\nStored `{}'\n".format(filename))
def main(cmd): cfg = yaml.safe_load(cmd.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] allow_less = cfg['dataset'].get('allow_less_chunks', False) train_ratio = cfg['dataset']['train_ratio'] num_train = int(num_chunks * train_ratio) num_test = num_chunks - num_train sort_type = cfg['dataset'].get('sort_type', 'mtime') if sort_type == 'mtime': sort_key_fn = os.path.getmtime elif sort_type == 'number': sort_key_fn = game_number_for_name elif sort_type == 'name': sort_key_fn = identity_function else: raise ValueError('Unknown dataset sort_type: {}'.format(sort_type)) if 'input_test' in cfg['dataset']: train_chunks = get_latest_chunks(cfg['dataset']['input_train'], num_train, allow_less, sort_key_fn) test_chunks = get_latest_chunks(cfg['dataset']['input_test'], num_test, allow_less, sort_key_fn) else: chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks, allow_less, sort_key_fn) if allow_less: num_train = int(len(chunks) * train_ratio) num_test = len(chunks) - num_train train_chunks = chunks[:num_train] test_chunks = chunks[num_train:] shuffle_size = cfg['training']['shuffle_size'] total_batch_size = cfg['training']['batch_size'] batch_splits = cfg['training'].get('num_batch_splits', 1) train_workers = cfg['dataset'].get('train_workers', None) test_workers = cfg['dataset'].get('test_workers', None) if total_batch_size % batch_splits != 0: raise ValueError('num_batch_splits must divide batch_size evenly') split_batch_size = total_batch_size // batch_splits diff_focus_min = cfg['training'].get('diff_focus_min', 1) diff_focus_slope = cfg['training'].get('diff_focus_slope', 0) diff_focus_q_weight = cfg['training'].get('diff_focus_q_weight', 6.0) diff_focus_pol_scale = cfg['training'].get('diff_focus_pol_scale', 3.5) root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) train_parser = ChunkParser(train_chunks, get_input_mode(cfg), shuffle_size=shuffle_size, sample=SKIP, batch_size=split_batch_size, diff_focus_min=diff_focus_min, diff_focus_slope=diff_focus_slope, diff_focus_q_weight=diff_focus_q_weight, diff_focus_pol_scale=diff_focus_pol_scale, workers=train_workers) test_shuffle_size = int(shuffle_size * (1.0 - train_ratio)) # no diff focus for test_parser test_parser = ChunkParser(test_chunks, get_input_mode(cfg), shuffle_size=test_shuffle_size, sample=SKIP, batch_size=split_batch_size, workers=test_workers) if 'input_validation' in cfg['dataset']: valid_chunks = get_all_chunks(cfg['dataset']['input_validation']) validation_parser = ChunkParser(valid_chunks, get_input_mode(cfg), sample=1, batch_size=split_batch_size, workers=0) import tensorflow as tf from chunkparsefunc import parse_function from tfprocess import TFProcess tfprocess = TFProcess(cfg) train_dataset = tf.data.Dataset.from_generator( train_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string, tf.string)) train_dataset = train_dataset.map(parse_function) test_dataset = tf.data.Dataset.from_generator( test_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string, tf.string)) test_dataset = test_dataset.map(parse_function) validation_dataset = None if 'input_validation' in cfg['dataset']: validation_dataset = tf.data.Dataset.from_generator( validation_parser.sequential, output_types=(tf.string, tf.string, tf.string, tf.string, tf.string)) validation_dataset = validation_dataset.map(parse_function) if tfprocess.strategy is None: #Mirrored strategy appends prefetch itself with a value depending on number of replicas train_dataset = train_dataset.prefetch(4) test_dataset = test_dataset.prefetch(4) if validation_dataset is not None: validation_dataset = validation_dataset.prefetch(4) else: options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF train_dataset = train_dataset.with_options(options) test_dataset = test_dataset.with_options(options) if validation_dataset is not None: validation_dataset = validation_dataset.with_options(options) tfprocess.init(train_dataset, test_dataset, validation_dataset) tfprocess.restore() # If number of test positions is not given # sweeps through all test chunks statistically # Assumes average of 10 samples per test game. # For simplicity, testing can use the split batch size instead of total batch size. # This does not affect results, because test results are simple averages that are independent of batch size. num_evals = cfg['training'].get('num_test_positions', len(test_chunks) * 10) num_evals = max(1, num_evals // split_batch_size) print("Using {} evaluation batches".format(num_evals)) tfprocess.total_batch_size = total_batch_size tfprocess.process_loop(total_batch_size, num_evals, batch_splits=batch_splits) if cmd.output is not None: if cfg['training'].get('swa_output', False): tfprocess.save_swa_weights(cmd.output) else: tfprocess.save_leelaz_weights(cmd.output) train_parser.shutdown() test_parser.shutdown()
class DataSet(): def __init__(self, cfg, dirname): self.parser = ChunkParser(cfg, dirname) self.cfg = cfg self.xsize = cfg.xsize self.ysize = cfg.ysize self.input_channels = cfg.input_channels self.input_features = cfg.input_features self.policy_map = cfg.policy_map def get_x(self, idx): return idx % self.xsize def get_y(self, idx): return idx // self.xsize def __getitem__(self, idx): b, s = self.parser[idx] data = self.parser.unpack_v1(b, s) input_planes = np.zeros((self.input_channels, self.ysize, self.xsize)) input_features = np.zeros(self.input_features) pol = np.zeros(self.policy_map * self.ysize * self.xsize) wdl = np.zeros(3) stm = np.zeros(1) symmetry = bool(np.random.choice(2, 1)[0]) # input planes for i in range(7): start = data.ACCUMULATE[i] num = data.PIECES_NUMBER[i] for n in range(num): cp_idx = data.current_pieces[start + n] if symmetry: cp_idx = symmetry_index[cp_idx] if cp_idx != -1: x = self.get_x(cp_idx) y = self.get_y(cp_idx) input_planes[i][y][x] = 1 op_idx = data.other_pieces[start + n] if symmetry: op_idx = symmetry_index[op_idx] if op_idx != -1: x = self.get_x(op_idx) y = self.get_y(op_idx) input_planes[i + 7][y][x] = 1 if data.tomove == 1: input_planes[14][:] = 1 else: input_planes[15][:] = 1 # input features input_features[0] = data.plies / 30 input_features[1] = data.rule50_remaining / 30 if data.repetitions >= 1: input_features[2] = 1 if data.repetitions >= 2: input_features[3] = 1 # probabilities for idx, p in zip(data.policyindex, data.probabilities): prob_idx = idx if symmetry: prob_idx = symmetry_maps[prob_idx] assert prob_idx != -1, "Invalid probabilities" pol[prob_idx] = p # winrate stm = data.result wdl[1 - data.result] = 1 return (torch.tensor(input_planes).float(), torch.tensor(input_features).float(), torch.tensor(pol).float(), torch.tensor(wdl).float(), torch.tensor(stm).float()) def __len__(self): return len(self.parser)
def main(cmd): cfg = yaml.safe_load(cmd.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] allow_less = cfg['dataset'].get('allow_less_chunks', False) train_ratio = cfg['dataset']['train_ratio'] experimental_parser = cfg['dataset'].get('experimental_v5_only_dataset', False) # num_train = int(num_chunks * train_ratio) # we just need to use one data loader, just put everything into train num_train = int(num_chunks) num_test = num_chunks - num_train sort_type = cfg['dataset'].get('sort_type', 'mtime') if sort_type == 'mtime': sort_key_fn = os.path.getmtime elif sort_type == 'number': sort_key_fn = game_number_for_name elif sort_type == 'name': sort_key_fn = identity_function else: raise ValueError('Unknown dataset sort_type: {}'.format(sort_type)) if 'input_test' in cfg['dataset']: train_chunks = get_latest_chunks(cfg['dataset']['input_train'], num_train, allow_less, sort_key_fn) test_chunks = get_latest_chunks(cfg['dataset']['input_test'], num_test, allow_less, sort_key_fn) else: chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks, allow_less, sort_key_fn) if allow_less: num_train = int(len(chunks) * train_ratio) num_test = len(chunks) - num_train train_chunks = chunks[:num_train] test_chunks = chunks[num_train:] # shuffle_size = cfg['training']['shuffle_size'] shuffle_size = 1 total_batch_size = cfg['training']['batch_size'] batch_splits = cfg['training'].get('num_batch_splits', 1) train_workers = cfg['dataset'].get('train_workers', None) test_workers = cfg['dataset'].get('test_workers', None) if total_batch_size % batch_splits != 0: raise ValueError('num_batch_splits must divide batch_size evenly') split_batch_size = total_batch_size // batch_splits # Load data with split batch size, which will be combined to the total batch size in tfprocess. ChunkParser.BATCH_SIZE = split_batch_size value_focus_min = cfg['training'].get('value_focus_min', 1) value_focus_slope = cfg['training'].get('value_focus_slope', 0) root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) tfprocess = TFProcess(cfg) experimental_reads = max(2, mp.cpu_count() - 2) // 2 extractor = select_extractor(tfprocess.INPUT_MODE) if experimental_parser and (value_focus_min != 1 or value_focus_slope != 0): raise ValueError( 'Experimental parser does not support non-default value \ focus parameters.') def read(x): return tf.data.FixedLengthRecordDataset( x, 8308, compression_type='GZIP', num_parallel_reads=experimental_reads) if experimental_parser: # train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).shuffle(len(train_chunks)).repeat().batch(256)\ train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).repeat().batch(256)\ .interleave(read, num_parallel_calls=1)\ .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\ .batch(split_batch_size).map(extractor) # .shuffle(shuffle_size)\ # .batch(split_batch_size).map(extractor) else: train_parser = ChunkParser(train_chunks, tfprocess.INPUT_MODE, shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE, value_focus_min=value_focus_min, value_focus_slope=value_focus_slope, workers=train_workers) train_dataset = tf.data.Dataset.from_generator( train_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string, tf.string)) train_dataset = train_dataset.map(ChunkParser.parse_function) shuffle_size = int(shuffle_size * (1.0 - train_ratio)) if experimental_parser: # test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).shuffle(len(test_chunks)).repeat().batch(256)\ test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).repeat().batch(256)\ .interleave(read, num_parallel_calls=2)\ .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\ .batch(split_batch_size).map(extractor) # .shuffle(shuffle_size)\ # .batch(split_batch_size).map(extractor) else: # no value focus for test_parser test_parser = ChunkParser(test_chunks, tfprocess.INPUT_MODE, shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE, workers=test_workers) test_dataset = tf.data.Dataset.from_generator( test_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string, tf.string)) test_dataset = test_dataset.map(ChunkParser.parse_function) validation_dataset = None if 'input_validation' in cfg['dataset']: valid_chunks = get_all_chunks(cfg['dataset']['input_validation']) validation_dataset = tf.data.FixedLengthRecordDataset(valid_chunks, 8308, compression_type='GZIP', num_parallel_reads=experimental_reads)\ .batch(split_batch_size, drop_remainder=True).map(extractor) if tfprocess.strategy is None: #Mirrored strategy appends prefetch itself with a value depending on number of replicas train_dataset = train_dataset.prefetch(4) test_dataset = test_dataset.prefetch(4) if validation_dataset is not None: validation_dataset = validation_dataset.prefetch(4) else: options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF train_dataset = train_dataset.with_options(options) test_dataset = test_dataset.with_options(options) if validation_dataset is not None: validation_dataset = validation_dataset.with_options(options) tfprocess.init_v2(train_dataset, test_dataset, validation_dataset) # load net from weights file given in yaml config tfprocess.replace_weights_v2(proto_filename=cmd.net, ignore_errors=False) train_chunks = sorted(train_chunks) custom_parse_gen = train_parser.custom_parse(train_chunks) print(train_chunks) counter = 0 custom_iter = iter(custom_parse_gen) for data in custom_iter:#i in range(30): # data = next(custom_iter) planes, probs, winner, best_q = train_parser.custom_get_batch(data) print(planes.shape) x = planes # TODO make sure no shuffling happens. the following output should clearly show the first few moves of the game w.r.t. pawn placement for i in range(len(x)): counter += 1 print('move no.:', counter) print(x[i, 0].reshape(8,8)) print() print() # TODO # print(tfprocess.model.summary()) # print(tfprocess.train_dataset) # data_iter = iter(tfprocess.train_dataset) # nxt = next(data_iter) # x, _, _, _, _ = nxt # x2_0_r = tf.reshape(x[0], [1, 112, 64]) # pred = tfprocess.model.predict(x) earlyPredictor = tf.keras.models.Model(tfprocess.model.inputs, [tfprocess.model.inputs, tfprocess.model.outputs, tfprocess.model.get_layer('activation_31').output]) early_pred_single = earlyPredictor.predict(x) # print(np.array(early_pred_single[0]).shape) input = np.array(early_pred_single[0]) print(input.shape) # print(early_pred_single[0]) # input # print(early_pred_single[1]) # output # print(early_pred_single[2]) # intermediate layer # print(train_parser.sample_record()) # print(next(tfprocess.train_iter)) # tfprocess.restore_v2() # If number of test positions is not given # sweeps through all test chunks statistically # Assumes average of 10 samples per test game. # For simplicity, testing can use the split batch size instead of total batch size. # This does not affect results, because test results are simple averages that are independent of batch size. # num_evals = cfg['training'].get('num_test_positions', # len(test_chunks) * 10) # num_evals = max(1, num_evals // ChunkParser.BATCH_SIZE) # print("Using {} evaluation batches".format(num_evals)) # tfprocess.total_batch_size = total_batch_size # tfprocess.process_loop_v2(total_batch_size, # num_evals, # batch_splits=batch_splits) # if cmd.output is not None: # if cfg['training'].get('swa_output', False): # tfprocess.save_swa_weights_v2(cmd.output) # else: # tfprocess.save_leelaz_weights_v2(cmd.output) train_parser.shutdown() test_parser.shutdown()
def read_data_sets(filenames, cfg=None, fake_data=False, one_hot=False, dtype=tf.float32, reshape=True, validation_size=5000, seed=None): if cfg is None: cfg = yaml.safe_load(FLAGS.cfg.read()) tf.logging.info(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] train_ratio = cfg['dataset']['train_ratio'] num_train = int(num_chunks * train_ratio) num_test = num_chunks - num_train if 'input_test' in cfg['dataset']: train_chunks = get_latest_chunks(cfg['dataset']['input_train'], num_train) test_chunks = get_latest_chunks(cfg['dataset']['input_test'], num_test) else: chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks) train_chunks = chunks[:num_train] test_chunks = chunks[num_train:] shuffle_size = cfg['training']['shuffle_size'] total_batch_size = cfg['training']['batch_size'] batch_splits = cfg['training'].get('num_batch_splits', 1) if total_batch_size % batch_splits != 0: raise ValueError('num_batch_splits must divide batch_size evenly') split_batch_size = total_batch_size // batch_splits # Load data with split batch size, which will be combined to the total batch size in tfprocess. ChunkParser.BATCH_SIZE = split_batch_size root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) t_chunks = FileDataSrc(train_chunks) train_parser = ChunkParser(t_chunks, shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE, auto_start_workers=False) final_train_data = [] tf.logging.info('Loading training dataset') for chunkdata in t_chunks: if len(final_train_data) > FLAGS.record_count: break lst = extract_data(train_parser, chunkdata) for i in lst: tf.logging.debug('{}: {:4}'.format('train', len(final_train_data))) final_train_data.append(i) shuffle_size = int(shuffle_size * (1.0 - train_ratio)) tt_chunks = FileDataSrc(test_chunks) test_parser = ChunkParser(tt_chunks, shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE, auto_start_workers=False) final_test_data = [] tf.logging.info('Loading testing dataset') for chunkdata in tt_chunks: if len(final_test_data) > FLAGS.record_count: break lst = extract_data(test_parser, chunkdata) for i in lst: tf.logging.debug('{}: {:4}'.format('test', len(final_test_data))) final_test_data.append(i) train_parser.shutdown() test_parser.shutdown() datasets = Dataset(train_data=final_train_data, test_data=final_test_data) return datasets
8308, compression_type='GZIP', num_parallel_reads=experimental_reads) if experimental_parser: train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).shuffle(len(train_chunks)).repeat().batch(256)\ .interleave(read, num_parallel_calls=2)\ .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\ .shuffle(shuffle_size)\ .batch(split_batch_size).map(extractor) else: train_parser = ChunkParser(train_chunks, tfprocess.INPUT_MODE, shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE, value_focus_min=value_focus_min, value_focus_slope=value_focus_slope, workers=train_workers) train_dataset = tf.data.Dataset.from_generator( train_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string, tf.string)) train_dataset = train_dataset.map(ChunkParser.parse_function) shuffle_size = int(shuffle_size * (1.0 - train_ratio)) if experimental_parser: test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).shuffle(len(test_chunks)).repeat().batch(256)\ .interleave(read, num_parallel_calls=2)\ .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\ .shuffle(shuffle_size)\ .batch(split_batch_size).map(extractor)
def main(): parser = argparse.ArgumentParser( description='Train network from game data.') parser.add_argument("blockspref", help="Number of blocks", nargs='?', type=int) parser.add_argument("filterspref", help="Number of filters", nargs='?', type=int) parser.add_argument("trainpref", help='Training file prefix', nargs='?', type=str) parser.add_argument("restorepref", help='Training snapshot prefix', nargs='?', type=str) parser.add_argument("--blocks", '-b', help="Number of blocks", type=int) parser.add_argument("--filters", '-f', help="Number of filters", type=int) parser.add_argument("--train", '-t', help="Training file prefix", type=str) parser.add_argument("--test", help="Test file prefix", type=str) parser.add_argument("--restore", type=str, help="Prefix of tensorflow snapshot to restore from") parser.add_argument("--logbase", default='leelalogs', type=str, help="Log file prefix (for tensorboard) (default: %(default)s)") parser.add_argument("--sample", default=DOWN_SAMPLE, type=int, help="Rate of data down-sampling to use (default: %(default)d)") parser.add_argument("--bufferbits", default=TRAIN_SHUFFLE_BITS, type=int, help="Train shuffle-buffer size in bits (default: %(default)d)") parser.add_argument("--rate", default=LEARN_RATE, type=float, help="Learning rate (default: %(default)f)") parser.add_argument("--steps", default=TRAINING_STEPS, type=int, help="Training step before writing a network (default: %(default)d)") parser.add_argument("--maxsteps", default=MAX_TRAINING_STEPS, type=int, help="Terminates after this many steps (default: %(default)d)") parser.add_argument("--maxkeep", default=MAX_SAVER_TO_KEEP, type=int, help="Keeps meta files for at most this many networks (default: %(default)d)") parser.add_argument("--policyloss", default=POLICY_LOSS_WT, type=float, help="Coefficient for policy term in loss function (default: %(default)f)") parser.add_argument("--mseloss", default=MSE_LOSS_WT, type=float, help="Coefficient for mse term in loss function (default: %(default)f)") parser.add_argument("--regloss", default=REG_LOSS_WT, type=float, help="Coefficient for regularizing term in loss function (default: %(default)f)") args = parser.parse_args() blocks = args.blocks or args.blockspref filters = args.filters or args.filterspref train_data_prefix = args.train or args.trainpref restore_prefix = args.restore or args.restorepref if not blocks or not filters: print("Must supply number of blocks and filters") return training = get_chunks(train_data_prefix) if not args.test: # Generate test by taking 10% of the training chunks. random.shuffle(training) training, test = split_chunks(training, 0.1) else: test = get_chunks(args.test) if not training: print("No data to train on!") return print("Training with {0} chunks, validating on {1} chunks".format( len(training), len(test))) train_parser = ChunkParser(FileDataSrc(training), shuffle_size=1<<args.bufferbits, # was 20 -- 2.2GB of RAM. sample=args.sample, batch_size=RAM_BATCH_SIZE).parse() test_parser = ChunkParser(FileDataSrc(test), shuffle_size=1<<(args.bufferbits-3), # was 19 sample=args.sample, batch_size=RAM_BATCH_SIZE).parse() tfprocess = TFProcess(blocks, filters, args.rate, args.steps, args.maxsteps, args.maxkeep, args.policyloss, args.mseloss, args.regloss) tfprocess.init(RAM_BATCH_SIZE, logbase=args.logbase, macrobatch=BATCH_SIZE // RAM_BATCH_SIZE) #benchmark1(tfprocess) if restore_prefix: tfprocess.restore(restore_prefix) tfprocess.process(train_parser, test_parser)
def main(cmd): cfg = yaml.safe_load(cmd.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] allow_less = cfg['dataset'].get('allow_less_chunks', False) train_ratio = cfg['dataset']['train_ratio'] experimental_parser = cfg['dataset'].get('experimental_v5_only_dataset', False) num_train = int(num_chunks * train_ratio) num_test = num_chunks - num_train if 'input_test' in cfg['dataset']: train_chunks = get_latest_chunks(cfg['dataset']['input_train'], num_train, allow_less) test_chunks = get_latest_chunks(cfg['dataset']['input_test'], num_test, allow_less) else: chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks, allow_less) if allow_less: num_train = int(len(chunks) * train_ratio) num_test = len(chunks) - num_train train_chunks = chunks[:num_train] test_chunks = chunks[num_train:] shuffle_size = cfg['training']['shuffle_size'] total_batch_size = cfg['training']['batch_size'] batch_splits = cfg['training'].get('num_batch_splits', 1) train_workers = cfg['dataset'].get('train_workers', None) test_workers = cfg['dataset'].get('test_workers', None) if total_batch_size % batch_splits != 0: raise ValueError('num_batch_splits must divide batch_size evenly') split_batch_size = total_batch_size // batch_splits # Load data with split batch size, which will be combined to the total batch size in tfprocess. ChunkParser.BATCH_SIZE = split_batch_size root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) tfprocess = TFProcess(cfg) experimental_reads = max(2, mp.cpu_count() - 2) // 2 extractor = select_extractor(tfprocess.INPUT_MODE) def read(x): return tf.data.FixedLengthRecordDataset( x, 8308, compression_type='GZIP', num_parallel_reads=experimental_reads) if experimental_parser: train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).shuffle(len(train_chunks)).repeat().batch(256)\ .interleave(read, num_parallel_calls=2)\ .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\ .shuffle(shuffle_size)\ .batch(split_batch_size).map(extractor).prefetch(4) else: train_parser = ChunkParser(train_chunks, tfprocess.INPUT_MODE, shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE, workers=train_workers) train_dataset = tf.data.Dataset.from_generator( train_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string, tf.string)) train_dataset = train_dataset.map(ChunkParser.parse_function) train_dataset = train_dataset.prefetch(4) shuffle_size = int(shuffle_size * (1.0 - train_ratio)) if experimental_parser: test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).shuffle(len(test_chunks)).repeat().batch(256)\ .interleave(read, num_parallel_calls=2)\ .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\ .shuffle(shuffle_size)\ .batch(split_batch_size).map(extractor).prefetch(4) else: test_parser = ChunkParser(test_chunks, tfprocess.INPUT_MODE, shuffle_size=shuffle_size, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE, workers=test_workers) test_dataset = tf.data.Dataset.from_generator( test_parser.parse, output_types=(tf.string, tf.string, tf.string, tf.string, tf.string)) test_dataset = test_dataset.map(ChunkParser.parse_function) test_dataset = test_dataset.prefetch(4) validation_dataset = None if 'input_validation' in cfg['dataset']: valid_chunks = get_all_chunks(cfg['dataset']['input_validation']) validation_dataset = tf.data.FixedLengthRecordDataset(valid_chunks, 8308, compression_type='GZIP', num_parallel_reads=experimental_reads)\ .batch(split_batch_size, drop_remainder=True).map(extractor).prefetch(4) tfprocess.init_v2(train_dataset, test_dataset, validation_dataset) tfprocess.restore_v2() # If number of test positions is not given # sweeps through all test chunks statistically # Assumes average of 10 samples per test game. # For simplicity, testing can use the split batch size instead of total batch size. # This does not affect results, because test results are simple averages that are independent of batch size. num_evals = cfg['training'].get('num_test_positions', len(test_chunks) * 10) num_evals = max(1, num_evals // ChunkParser.BATCH_SIZE) print("Using {} evaluation batches".format(num_evals)) tfprocess.process_loop_v2(total_batch_size, num_evals, batch_splits=batch_splits) if cmd.output is not None: if cfg['training'].get('swa_output', False): tfprocess.save_swa_weights_v2(cmd.output) else: tfprocess.save_leelaz_weights_v2(cmd.output) train_parser.shutdown() test_parser.shutdown()
def main(cmd): cfg = yaml.safe_load(cmd.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) num_chunks = cfg['dataset']['num_chunks'] allow_less = cfg['dataset'].get('allow_less_chunks', False) train_ratio = cfg['dataset']['train_ratio'] experimental_parser = cfg['dataset'].get('experimental_v5_only_dataset', False) num_train = int(num_chunks * train_ratio) num_test = num_chunks - num_train if 'input_test' in cfg['dataset']: train_chunks = get_latest_chunks(cfg['dataset']['input_train'], num_train, allow_less) test_chunks = get_latest_chunks(cfg['dataset']['input_test'], num_test, allow_less) else: chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks, allow_less) if allow_less: num_train = int(len(chunks) * train_ratio) num_test = len(chunks) - num_train train_chunks = chunks[:num_train] test_chunks = chunks[num_train:] shuffle_size = cfg['training']['shuffle_size'] total_batch_size = cfg['training']['batch_size'] batch_splits = cfg['training'].get('num_batch_splits', 1) if total_batch_size % batch_splits != 0: raise ValueError('num_batch_splits must divide batch_size evenly') split_batch_size = total_batch_size // batch_splits # Load data with split batch size, which will be combined to the total batch size in tfprocess. ChunkParser.BATCH_SIZE = split_batch_size tfprocess = TFProcess(cfg) train_parser = ChunkParser(train_chunks, tfprocess.INPUT_MODE, shuffle_size=10000, sample=SKIP, batch_size=ChunkParser.BATCH_SIZE, workers=4) batch_gen = train_parser.parse() device = th.device('cuda:0' if th.cuda.is_available() else 'cpu') model = th.nn.Sequential(th.nn.Conv2d(112, 128, 3, padding=1, bias=False), th.nn.ReLU(), ResidualBlock(), ResidualBlock(), ResidualBlock(), ResidualBlock(), ResidualBlock(), ResidualBlock(), th.nn.Conv2d(128, 32, 1, bias=False), th.nn.ReLU(), th.nn.Flatten(), th.nn.Linear(2048, 1858, bias=False)) model.train() model = model.to(device) optimizer = th.optim.SGD(model.parameters(), lr=0.01) train_batches = 100000 for i in range(train_batches): # print(f'getting data {i} ...') x, y, z, q, m = next(batch_gen) x, y, z, q, m = ChunkParser.parse_function(x, y, z, q, m) x = x.numpy() x = th.Tensor(x) x = x.reshape((-1, 112, 8, 8)) y = th.Tensor(y.numpy()) x, y = x.to(device), y.to(device) optimizer.zero_grad() policy = model(x) loss = th.mean( th.sum(-th.log_softmax(policy, 1) * th.nn.functional.relu(y), 1)) loss.backward() optimizer.step() if i % 10 == 0: print(f'step = {i}, loss = {loss.item()}') train_parser.shutdown()