Esempio n. 1
0
def load_policy_agent(h5file):
    model = kerasutil.load_model_from_hdf5_group(h5file['model'])
    encoder_name = h5file['encoder'].attrs['name']
    board_width = h5file['encoder'].attrs['board_width']
    board_height = h5file['encoder'].attrs['board_height']
    encoder = encoders.get_encoder_by_name(encoder_name,
                                           (board_width, board_height))
    return PolicyAgent(model, encoder)
Esempio n. 2
0
def load_policy_agent(h5file_dir):
    h5file = h5py.File(h5file_dir, 'r')
    model = kerasutil.load_model_from_hdf5_group(h5file['model'])
    encoder_name = h5file['encoder'].attrs['name']
    board_width = h5file['encoder'].attrs['board_width']
    board_height = h5file['encoder'].attrs['board_height']
    encoder = get_encoder_by_name(encoder_name, board_height)
    return SeegaAgent(model, encoder)
Esempio n. 3
0
 def load(cls, filename):
     inp = h5py.File(filename, 'r')
     model = kerasutil.load_model_from_hdf5_group(inp['model'])
     training_run = cls(filename, model,
                        inp['metadata'].attrs['epochs_completed'],
                        inp['metadata'].attrs['chunks_completed'],
                        inp['metadata'].attrs['num_chunks'])
     inp.close()
     return training_run
Esempio n. 4
0
def load_prediction_agent(h5file):
    model = kerasutil.load_model_from_hdf5_group(h5file['model'])
    encoder_name = h5file['encoder'].attrs['name']
    if not isinstance(encoder_name, str):
        encoder_name = encoder_name.decode('ascii')
    board_width = h5file['encoder'].attrs['board_width']
    board_height = h5file['encoder'].attrs['board_height']
    encoder = encoders.get_encoder_by_name(encoder_name, (board_width, board_height))
    return DeepLearningAgent(model, encoder)
Esempio n. 5
0
def load_policy_agent(h5file):
    model = kerasutil.load_model_from_hdf5_group(
        h5file['model'],
        custom_objects={'policy_gradient_loss': policy_gradient_loss})
    encoder_name = h5file['encoder'].attrs['name']
    if not isinstance(encoder_name, str):
        encoder_name = encoder_name.decode('ascii')
    board_width = h5file['encoder'].attrs['board_width']
    board_height = h5file['encoder'].attrs['board_height']
    encoder = ElevenPlaneEncoder((board_width, board_height))
    return PolicyAgent(model, encoder)
Esempio n. 6
0
def load_policy_agent(h5file):
    model = kerasutil.load_model_from_hdf5_group(
        h5file['model']
    )  # Uses built in Keras functions to load the model structure and weights
    encoder_name = h5file['encoder'].attrs[
        'name']  # Recovers the board encoder
    board_width = h5file['encoder'].attrs['board_width']
    board_height = h5file['encoder'].attrs['board_height']
    if type(encoder_name) == bytes:
        encoder_name = encoder_name.decode()
    encoder = encoders.get_encoder_by_name(encoder_name,
                                           (board_width, board_height))
    return PolicyAgent(model, encoder)  # Reconstructs the agent
Esempio n. 7
0
def load_zero_agent(h5file):
    model = kerasutil.load_model_from_hdf5_group(
        h5file['model']
    )  # Uses built in Keras functions to load the model structure and weights
    encoder_name = h5file['encoder'].attrs[
        'name']  # Recovers the board encoder
    board_size = h5file['encoder'].attrs['board_size']
    num_rounds = h5file['meta'].attrs['num_rounds']
    c = h5file['meta'].attrs['c']

    # zero encoder isn't in dlgo.encoders ... should it be?
    encoder = ZeroEncoder(board_size)

    return ZeroAgent(model, encoder, num_rounds, c)
Esempio n. 8
0
def main():
    pth = '//home//nail//Code_Go//checkpoints//'
    pth_experience = '//home//nail//Experience//'
    experience = []
    os.chdir(pth_experience)
    lst_files = os.listdir(pth_experience)
    pattern = input('Паттерн для выборки файлов для обучения: ')
    if len(pattern) == 0:
        pattern = "exp*.h5"

    for entry in lst_files:
        if fnmatch.fnmatch(entry, pattern):
            experience.append(entry)

    experience.sort()
    learning_agent = input('learning_agent:')
    learning_agent = pth + learning_agent + '.h5'
    print('learning_agent: ', learning_agent)
    agent_out = input('agent_out:')
    agent_out = pth + agent_out + '.h5'
    board_size = 19
    print('agent_out: ', agent_out)
    try:
        lr = float(input('lr = '))
    except:
        lr = 0.000001
    try:
        bs = int(input('bs = '))
    except:
        bs = 1024

    # ==================================================
    import tensorflow as tf
    config = tf.compat.v1.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.98
    config.gpu_options.allow_growth = True
    config.log_device_placement = True
    sess = tf.compat.v1.Session(config=config)
    tf.compat.v1.keras.backend.set_session(sess)
    # ==================================================
    encoder = SimpleEncoder((board_size, board_size))
    try:
        h5file = h5py.File(learning_agent, "r")
        learning_agent = rl.load_value_agent(h5file)
        model_v = kerasutil.load_model_from_hdf5_group(h5file['model'])

    except:

        learning_agent = create_v_model(lr=lr)
    i = 1
    num_files = len(experience)
    for exp_filename in experience:
        print(50 * '=')
        print('Файл для обучения: %s...' % exp_filename)
        print(50 * '=')
        exp_buffer = rl.load_experience(h5py.File(exp_filename, "r"))
        model_v = my_train_v(model_v,
                             encoder,
                             exp_buffer,
                             lr=lr,
                             batch_size=bs)

        print('Обработано файлов: ', i, ' из ', num_files)
        i += 1

    learning_agent = rl.ValueAgent(model_v, encoder)
    with h5py.File(agent_out, 'w') as updated_agent_outf:
        learning_agent.serialize(updated_agent_outf)