Ejemplo n.º 1
0
def main(cmd):
    cfg = yaml.safe_load(cmd.cfg.read())
    print(yaml.dump(cfg, default_flow_style=False))

    num_chunks = cfg['dataset']['num_chunks']
    allow_less = cfg['dataset'].get('allow_less_chunks', False)
    train_ratio = cfg['dataset']['train_ratio']
    experimental_parser = cfg['dataset'].get('experimental_v5_only_dataset',
                                             False)
    # num_train = int(num_chunks * train_ratio)
    # we just need to use one data loader, just put everything into train
    num_train = int(num_chunks)
    num_test = num_chunks - num_train
    sort_type = cfg['dataset'].get('sort_type', 'mtime')
    if sort_type == 'mtime':
        sort_key_fn = os.path.getmtime
    elif sort_type == 'number':
        sort_key_fn = game_number_for_name
    elif sort_type == 'name':
        sort_key_fn = identity_function
    else:
        raise ValueError('Unknown dataset sort_type: {}'.format(sort_type))
    if 'input_test' in cfg['dataset']:
        train_chunks = get_latest_chunks(cfg['dataset']['input_train'],
                                         num_train, allow_less, sort_key_fn)
        test_chunks = get_latest_chunks(cfg['dataset']['input_test'], num_test,
                                        allow_less, sort_key_fn)
    else:
        chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks,
                                   allow_less, sort_key_fn)
        if allow_less:
            num_train = int(len(chunks) * train_ratio)
            num_test = len(chunks) - num_train
        train_chunks = chunks[:num_train]
        test_chunks = chunks[num_train:]

    # shuffle_size = cfg['training']['shuffle_size']
    shuffle_size = 1
    total_batch_size = cfg['training']['batch_size']
    batch_splits = cfg['training'].get('num_batch_splits', 1)
    train_workers = cfg['dataset'].get('train_workers', None)
    test_workers = cfg['dataset'].get('test_workers', None)
    if total_batch_size % batch_splits != 0:
        raise ValueError('num_batch_splits must divide batch_size evenly')
    split_batch_size = total_batch_size // batch_splits
    # Load data with split batch size, which will be combined to the total batch size in tfprocess.
    ChunkParser.BATCH_SIZE = split_batch_size

    value_focus_min = cfg['training'].get('value_focus_min', 1)
    value_focus_slope = cfg['training'].get('value_focus_slope', 0)

    root_dir = os.path.join(cfg['training']['path'], cfg['name'])
    if not os.path.exists(root_dir):
        os.makedirs(root_dir)
    tfprocess = TFProcess(cfg)
    experimental_reads = max(2, mp.cpu_count() - 2) // 2
    extractor = select_extractor(tfprocess.INPUT_MODE)

    if experimental_parser and (value_focus_min != 1
                                or value_focus_slope != 0):
        raise ValueError(
            'Experimental parser does not support non-default value \
                          focus parameters.')

    def read(x):
        return tf.data.FixedLengthRecordDataset(
            x,
            8308,
            compression_type='GZIP',
            num_parallel_reads=experimental_reads)

    if experimental_parser:
        # train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).shuffle(len(train_chunks)).repeat().batch(256)\
        train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).repeat().batch(256)\
                         .interleave(read, num_parallel_calls=1)\
                         .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\
                         .batch(split_batch_size).map(extractor)
        #  .shuffle(shuffle_size)\
        # .batch(split_batch_size).map(extractor)
    else:
        train_parser = ChunkParser(train_chunks,
                                   tfprocess.INPUT_MODE,
                                   shuffle_size=shuffle_size,
                                   sample=SKIP,
                                   batch_size=ChunkParser.BATCH_SIZE,
                                   value_focus_min=value_focus_min,
                                   value_focus_slope=value_focus_slope,
                                   workers=train_workers)
        train_dataset = tf.data.Dataset.from_generator(
            train_parser.parse,
            output_types=(tf.string, tf.string, tf.string, tf.string,
                          tf.string))
        train_dataset = train_dataset.map(ChunkParser.parse_function)

    shuffle_size = int(shuffle_size * (1.0 - train_ratio))
    if experimental_parser:
        # test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).shuffle(len(test_chunks)).repeat().batch(256)\
        test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).repeat().batch(256)\
                         .interleave(read, num_parallel_calls=2)\
                         .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\
                         .batch(split_batch_size).map(extractor)
        #  .shuffle(shuffle_size)\
        #  .batch(split_batch_size).map(extractor)

    else:
        # no value focus for test_parser
        test_parser = ChunkParser(test_chunks,
                                  tfprocess.INPUT_MODE,
                                  shuffle_size=shuffle_size,
                                  sample=SKIP,
                                  batch_size=ChunkParser.BATCH_SIZE,
                                  workers=test_workers)
        test_dataset = tf.data.Dataset.from_generator(
            test_parser.parse,
            output_types=(tf.string, tf.string, tf.string, tf.string,
                          tf.string))
        test_dataset = test_dataset.map(ChunkParser.parse_function)
    validation_dataset = None
    if 'input_validation' in cfg['dataset']:
        valid_chunks = get_all_chunks(cfg['dataset']['input_validation'])
        validation_dataset = tf.data.FixedLengthRecordDataset(valid_chunks, 8308, compression_type='GZIP', num_parallel_reads=experimental_reads)\
                               .batch(split_batch_size, drop_remainder=True).map(extractor)
    if tfprocess.strategy is None:  #Mirrored strategy appends prefetch itself with a value depending on number of replicas
        train_dataset = train_dataset.prefetch(4)
        test_dataset = test_dataset.prefetch(4)
        if validation_dataset is not None:
            validation_dataset = validation_dataset.prefetch(4)
    else:
        options = tf.data.Options()
        options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
        train_dataset = train_dataset.with_options(options)
        test_dataset = test_dataset.with_options(options)
        if validation_dataset is not None:
            validation_dataset = validation_dataset.with_options(options)

    ##########################
    # Custom Additions #
    ##########################

    tfprocess.init_v2(train_dataset, test_dataset, validation_dataset)
    # load net from weights file given in yaml config
    tfprocess.replace_weights_v2(proto_filename=cmd.net, ignore_errors=False)
    tfprocess.model.summary()

    for layer_name, path in zip(cmd.layer, cmd.path):

        # sort data files
        train_chunks = sorted(train_chunks)

        # create predictor that gives access to specific intermediate layer
        layer = tfprocess.model.get_layer(layer_name)
        earlyPredictor = tf.keras.models.Model(
            tfprocess.model.inputs,
            [tfprocess.model.inputs, tfprocess.model.outputs, layer.output])

        # create custom iterator which doesn't shuffle the data etc
        custom_parse_gen = train_parser.custom_parse(train_chunks)
        turn_counter = 0
        custom_iter = iter(custom_parse_gen)

        # prepare dataframe
        df = pd.DataFrame()

        # iterate entire dataset generator / iterator
        for data in custom_iter:  #i in range(30):
            # data = next(custom_iter)
            planes, probs, winner, best_q = train_parser.custom_get_batch(data)
            x = planes
            print('predicting...')
            _, _, layer_results = earlyPredictor.predict(x)
            # append to dataframe
            # df = df.append(pd.DataFrame(activation_31.reshape(-1,128*8*8)))
            shape_tuple = (-1, np.prod(layer.output_shape[1:]))
            df = df.append(pd.DataFrame(layer_results.reshape(shape_tuple)))

            turn_counter += len(x)

        df.info()
        df.to_csv(path)

        print('done')

    train_parser.shutdown()
    test_parser.shutdown()
Ejemplo n.º 2
0
def main(cmd):
    cfg = yaml.safe_load(cmd.cfg.read())
    print(yaml.dump(cfg, default_flow_style=False))

    num_chunks = cfg['dataset']['num_chunks']
    allow_less = cfg['dataset'].get('allow_less_chunks', False)
    train_ratio = cfg['dataset']['train_ratio']
    experimental_parser = cfg['dataset'].get('experimental_v5_only_dataset',
                                             False)
    num_train = int(num_chunks * train_ratio)
    num_test = num_chunks - num_train
    if 'input_test' in cfg['dataset']:
        train_chunks = get_latest_chunks(cfg['dataset']['input_train'],
                                         num_train, allow_less)
        test_chunks = get_latest_chunks(cfg['dataset']['input_test'], num_test,
                                        allow_less)
    else:
        chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks,
                                   allow_less)
        if allow_less:
            num_train = int(len(chunks) * train_ratio)
            num_test = len(chunks) - num_train
        train_chunks = chunks[:num_train]
        test_chunks = chunks[num_train:]

    shuffle_size = cfg['training']['shuffle_size']
    total_batch_size = cfg['training']['batch_size']
    batch_splits = cfg['training'].get('num_batch_splits', 1)
    train_workers = cfg['dataset'].get('train_workers', None)
    test_workers = cfg['dataset'].get('test_workers', None)
    if total_batch_size % batch_splits != 0:
        raise ValueError('num_batch_splits must divide batch_size evenly')
    split_batch_size = total_batch_size // batch_splits
    # Load data with split batch size, which will be combined to the total batch size in tfprocess.
    ChunkParser.BATCH_SIZE = split_batch_size

    root_dir = os.path.join(cfg['training']['path'], cfg['name'])
    if not os.path.exists(root_dir):
        os.makedirs(root_dir)
    tfprocess = TFProcess(cfg)
    experimental_reads = max(2, mp.cpu_count() - 2) // 2
    extractor = select_extractor(tfprocess.INPUT_MODE)

    def read(x):
        return tf.data.FixedLengthRecordDataset(
            x,
            8308,
            compression_type='GZIP',
            num_parallel_reads=experimental_reads)

    if experimental_parser:
        train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).shuffle(len(train_chunks)).repeat().batch(256)\
                         .interleave(read, num_parallel_calls=2)\
                         .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\
                         .shuffle(shuffle_size)\
                         .batch(split_batch_size).map(extractor).prefetch(4)
    else:
        train_parser = ChunkParser(train_chunks,
                                   tfprocess.INPUT_MODE,
                                   shuffle_size=shuffle_size,
                                   sample=SKIP,
                                   batch_size=ChunkParser.BATCH_SIZE,
                                   workers=train_workers)
        train_dataset = tf.data.Dataset.from_generator(
            train_parser.parse,
            output_types=(tf.string, tf.string, tf.string, tf.string,
                          tf.string))
        train_dataset = train_dataset.map(ChunkParser.parse_function)
        train_dataset = train_dataset.prefetch(4)

    shuffle_size = int(shuffle_size * (1.0 - train_ratio))
    if experimental_parser:
        test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).shuffle(len(test_chunks)).repeat().batch(256)\
                         .interleave(read, num_parallel_calls=2)\
                         .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\
                         .shuffle(shuffle_size)\
                         .batch(split_batch_size).map(extractor).prefetch(4)
    else:
        test_parser = ChunkParser(test_chunks,
                                  tfprocess.INPUT_MODE,
                                  shuffle_size=shuffle_size,
                                  sample=SKIP,
                                  batch_size=ChunkParser.BATCH_SIZE,
                                  workers=test_workers)
        test_dataset = tf.data.Dataset.from_generator(
            test_parser.parse,
            output_types=(tf.string, tf.string, tf.string, tf.string,
                          tf.string))
        test_dataset = test_dataset.map(ChunkParser.parse_function)
        test_dataset = test_dataset.prefetch(4)

    validation_dataset = None
    if 'input_validation' in cfg['dataset']:
        valid_chunks = get_all_chunks(cfg['dataset']['input_validation'])
        validation_dataset = tf.data.FixedLengthRecordDataset(valid_chunks, 8308, compression_type='GZIP', num_parallel_reads=experimental_reads)\
                               .batch(split_batch_size, drop_remainder=True).map(extractor).prefetch(4)

    tfprocess.init_v2(train_dataset, test_dataset, validation_dataset)

    tfprocess.restore_v2()

    # If number of test positions is not given
    # sweeps through all test chunks statistically
    # Assumes average of 10 samples per test game.
    # For simplicity, testing can use the split batch size instead of total batch size.
    # This does not affect results, because test results are simple averages that are independent of batch size.
    num_evals = cfg['training'].get('num_test_positions',
                                    len(test_chunks) * 10)
    num_evals = max(1, num_evals // ChunkParser.BATCH_SIZE)
    print("Using {} evaluation batches".format(num_evals))

    tfprocess.process_loop_v2(total_batch_size,
                              num_evals,
                              batch_splits=batch_splits)

    if cmd.output is not None:
        if cfg['training'].get('swa_output', False):
            tfprocess.save_swa_weights_v2(cmd.output)
        else:
            tfprocess.save_leelaz_weights_v2(cmd.output)

    train_parser.shutdown()
    test_parser.shutdown()
Ejemplo n.º 3
0
def main(cmd):
    cfg = yaml.safe_load(cmd.cfg.read())
    print(yaml.dump(cfg, default_flow_style=False))

    num_chunks = cfg['dataset']['num_chunks']
    allow_less = cfg['dataset'].get('allow_less_chunks', False)
    train_ratio = cfg['dataset']['train_ratio']
    experimental_parser = cfg['dataset'].get('experimental_v5_only_dataset',
                                             False)
    # num_train = int(num_chunks * train_ratio)
    # we just need to use one data loader, just put everything into train
    num_train = int(num_chunks)
    num_test = num_chunks - num_train
    sort_type = cfg['dataset'].get('sort_type', 'mtime')
    if sort_type == 'mtime':
        sort_key_fn = os.path.getmtime
    elif sort_type == 'number':
        sort_key_fn = game_number_for_name
    elif sort_type == 'name':
        sort_key_fn = identity_function
    else:
        raise ValueError('Unknown dataset sort_type: {}'.format(sort_type))
    if 'input_test' in cfg['dataset']:
        train_chunks = get_latest_chunks(cfg['dataset']['input_train'],
                                         num_train, allow_less, sort_key_fn)
        test_chunks = get_latest_chunks(cfg['dataset']['input_test'], num_test,
                                        allow_less, sort_key_fn)
    else:
        chunks = get_latest_chunks(cfg['dataset']['input'], num_chunks,
                                   allow_less, sort_key_fn)
        if allow_less:
            num_train = int(len(chunks) * train_ratio)
            num_test = len(chunks) - num_train
        train_chunks = chunks[:num_train]
        test_chunks = chunks[num_train:]

    # shuffle_size = cfg['training']['shuffle_size']
    shuffle_size = 1
    total_batch_size = cfg['training']['batch_size']
    batch_splits = cfg['training'].get('num_batch_splits', 1)
    train_workers = cfg['dataset'].get('train_workers', None)
    test_workers = cfg['dataset'].get('test_workers', None)
    if total_batch_size % batch_splits != 0:
        raise ValueError('num_batch_splits must divide batch_size evenly')
    split_batch_size = total_batch_size // batch_splits
    # Load data with split batch size, which will be combined to the total batch size in tfprocess.
    ChunkParser.BATCH_SIZE = split_batch_size

    value_focus_min = cfg['training'].get('value_focus_min', 1)
    value_focus_slope = cfg['training'].get('value_focus_slope', 0)

    root_dir = os.path.join(cfg['training']['path'], cfg['name'])
    if not os.path.exists(root_dir):
        os.makedirs(root_dir)
    tfprocess = TFProcess(cfg)
    experimental_reads = max(2, mp.cpu_count() - 2) // 2
    extractor = select_extractor(tfprocess.INPUT_MODE)

    if experimental_parser and (value_focus_min != 1
                                or value_focus_slope != 0):
        raise ValueError(
            'Experimental parser does not support non-default value \
                          focus parameters.')

    def read(x):
        return tf.data.FixedLengthRecordDataset(
            x,
            8308,
            compression_type='GZIP',
            num_parallel_reads=experimental_reads)

    if experimental_parser:
        # train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).shuffle(len(train_chunks)).repeat().batch(256)\
        train_dataset = tf.data.Dataset.from_tensor_slices(train_chunks).repeat().batch(256)\
                         .interleave(read, num_parallel_calls=1)\
                         .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\
                         .batch(split_batch_size).map(extractor)
                        #  .shuffle(shuffle_size)\
                        # .batch(split_batch_size).map(extractor)
    else:
        train_parser = ChunkParser(train_chunks,
                                   tfprocess.INPUT_MODE,
                                   shuffle_size=shuffle_size,
                                   sample=SKIP,
                                   batch_size=ChunkParser.BATCH_SIZE,
                                   value_focus_min=value_focus_min,
                                   value_focus_slope=value_focus_slope,
                                   workers=train_workers)
        train_dataset = tf.data.Dataset.from_generator(
            train_parser.parse,
            output_types=(tf.string, tf.string, tf.string, tf.string,
                          tf.string))
        train_dataset = train_dataset.map(ChunkParser.parse_function)

    shuffle_size = int(shuffle_size * (1.0 - train_ratio))
    if experimental_parser:
        # test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).shuffle(len(test_chunks)).repeat().batch(256)\
        test_dataset = tf.data.Dataset.from_tensor_slices(test_chunks).repeat().batch(256)\
                         .interleave(read, num_parallel_calls=2)\
                         .batch(SKIP_MULTIPLE*SKIP).map(semi_sample).unbatch()\
                         .batch(split_batch_size).map(extractor)
                        #  .shuffle(shuffle_size)\
                        #  .batch(split_batch_size).map(extractor)
                         
    else:
        # no value focus for test_parser
        test_parser = ChunkParser(test_chunks,
                                  tfprocess.INPUT_MODE,
                                  shuffle_size=shuffle_size,
                                  sample=SKIP,
                                  batch_size=ChunkParser.BATCH_SIZE,
                                  workers=test_workers)
        test_dataset = tf.data.Dataset.from_generator(
            test_parser.parse,
            output_types=(tf.string, tf.string, tf.string, tf.string,
                          tf.string))
        test_dataset = test_dataset.map(ChunkParser.parse_function)
    validation_dataset = None
    if 'input_validation' in cfg['dataset']:
        valid_chunks = get_all_chunks(cfg['dataset']['input_validation'])
        validation_dataset = tf.data.FixedLengthRecordDataset(valid_chunks, 8308, compression_type='GZIP', num_parallel_reads=experimental_reads)\
                               .batch(split_batch_size, drop_remainder=True).map(extractor)
    if tfprocess.strategy is None:  #Mirrored strategy appends prefetch itself with a value depending on number of replicas
        train_dataset = train_dataset.prefetch(4)
        test_dataset = test_dataset.prefetch(4)
        if validation_dataset is not None:
            validation_dataset = validation_dataset.prefetch(4)
    else:
        options = tf.data.Options()
        options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
        train_dataset = train_dataset.with_options(options)
        test_dataset = test_dataset.with_options(options)
        if validation_dataset is not None:
            validation_dataset = validation_dataset.with_options(options)


    tfprocess.init_v2(train_dataset, test_dataset, validation_dataset)
    # load net from weights file given in yaml config
    tfprocess.replace_weights_v2(proto_filename=cmd.net, ignore_errors=False)

    train_chunks = sorted(train_chunks)

    custom_parse_gen = train_parser.custom_parse(train_chunks)
    print(train_chunks)
    counter = 0
    custom_iter = iter(custom_parse_gen)
    for data in custom_iter:#i in range(30):
        # data = next(custom_iter)
        planes, probs, winner, best_q = train_parser.custom_get_batch(data)
        print(planes.shape)
        x = planes

        # TODO make sure no shuffling happens. the following output should clearly show the first few moves of the game w.r.t. pawn placement
        for i in range(len(x)):
            counter += 1
            print('move no.:', counter)
            print(x[i, 0].reshape(8,8))
            print()
        print()

    # TODO
    # print(tfprocess.model.summary())
    # print(tfprocess.train_dataset)
    # data_iter = iter(tfprocess.train_dataset)
    # nxt = next(data_iter)
    # x, _, _, _, _ = nxt
    # x2_0_r = tf.reshape(x[0], [1, 112, 64])

    # pred = tfprocess.model.predict(x)

    earlyPredictor = tf.keras.models.Model(tfprocess.model.inputs, [tfprocess.model.inputs, tfprocess.model.outputs, tfprocess.model.get_layer('activation_31').output])
    early_pred_single = earlyPredictor.predict(x)
    # print(np.array(early_pred_single[0]).shape)
    input = np.array(early_pred_single[0])
    print(input.shape)

    

    # print(early_pred_single[0]) # input
    # print(early_pred_single[1]) # output
    # print(early_pred_single[2]) # intermediate layer

    # print(train_parser.sample_record())
    # print(next(tfprocess.train_iter))

    # tfprocess.restore_v2()

    # If number of test positions is not given
    # sweeps through all test chunks statistically
    # Assumes average of 10 samples per test game.
    # For simplicity, testing can use the split batch size instead of total batch size.
    # This does not affect results, because test results are simple averages that are independent of batch size.
    # num_evals = cfg['training'].get('num_test_positions',
    #                                 len(test_chunks) * 10)
    # num_evals = max(1, num_evals // ChunkParser.BATCH_SIZE)
    # print("Using {} evaluation batches".format(num_evals))
    # tfprocess.total_batch_size = total_batch_size
    # tfprocess.process_loop_v2(total_batch_size,
    #                           num_evals,
    #                           batch_splits=batch_splits)

    # if cmd.output is not None:
    #     if cfg['training'].get('swa_output', False):
    #         tfprocess.save_swa_weights_v2(cmd.output)
    #     else:
    #         tfprocess.save_leelaz_weights_v2(cmd.output)

    train_parser.shutdown()
    test_parser.shutdown()