Пример #1
0
def train():
    model = AttentionModel(params_config, human_vocab_size,
                           machine_vocab_size).model

    op = Adam(lr=params_config['learning_rate'],
              decay=params_config['decay'],
              clipnorm=params_config['clipnorm'])

    if os.path.exists('./Model/model.h5'):
        print('loading model...')

        model.load_weights('./Model/model.h5')

        model.compile(optimizer=op,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    else:
        model.compile(optimizer=op,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        outputs_train = list(Yoh_train.swapaxes(0, 1))

        model.fit(Xoh_train,
                  outputs_train,
                  epochs=params_config['epochs'],
                  batch_size=params_config['batch_size'],
                  validation_split=0.1)

        if not os.path.exists('Model'):
            os.mkdir('Model')

        model.save_weights('./Model/model.h5')
    return model
Пример #2
0
def load_model(path, embed_dim=128, n_customer=20, n_encode_layers=3):
    """ Load model weights from hd5 file
		https://stackoverflow.com/questions/51806852/cant-save-custom-subclassed-model
	"""
    small_dataset = generate_data(n_samples=5, n_customer=n_customer)
    model_loaded = AttentionModel(embed_dim, n_encode_layers=n_encode_layers)
    for data in (small_dataset.batch(5)):
        _, _ = model_loaded(data, decode_type='greedy')

    model_loaded.load_weights(path)
    return model_loaded
Пример #3
0
def load_model(path, embed_dim=128, n_customer=20, n_encode_layers=3):
    """ Load model weights from hd5 file
		https://stackoverflow.com/questions/51806852/cant-save-custom-subclassed-model
	"""
    CAPACITIES = {10: 20., 20: 30., 50: 40., 100: 50.}
    data_random = (
        tf.random.uniform((2, 2), minval=0, maxval=1),
        tf.random.uniform((2, n_customer, 2), minval=0, maxval=1),
        tf.cast(
            tf.random.uniform(
                (2, n_customer), minval=1, maxval=10, dtype=tf.int32),
            tf.float32) / tf.cast(CAPACITIES[n_customer], tf.float32))

    model_loaded = AttentionModel(embed_dim,
                                  n_encode_layers=n_encode_layers,
                                  decode_type='greedy')
    _, _ = model_loaded(data_random)
    model_loaded.load_weights(path)
    return model_loaded