Esempio n. 1
0
def load_custom_model(root_path, object_image_features_filename,
                      model_filename):
    from Attention import Attention
    from models import NIC
    from generator import Generator

    preprocessed_data_path = root_path + 'preprocessed_data/'
    generator = Generator(
        data_path=preprocessed_data_path,
        batch_size=configs["batch_size"],
        image_features_filename=object_image_features_filename)
    if (configs['w2v_weights']):
        embedding_weights = generator.embedding_matrix
    else:
        embedding_weights = None
    model = NIC(max_token_length=generator.MAX_TOKEN_LENGTH,
                vocabulary_size=generator.VOCABULARY_SIZE,
                tweet_max_len=configs['tweet_max_len'],
                tweet_max_words=configs['tweet_max_words'],
                rnn='gru',
                num_image_features=generator.IMG_FEATS,
                hidden_size=256,
                embedding_size=128,
                embedding_weights=embedding_weights)
    model.load_weights(model_filename)
    # model.load_model(model_filename)
    return model
Esempio n. 2
0
preprocessed_data_path = root_path + 'preprocessed_data/'
generator = Generator(data_path=preprocessed_data_path,
                      batch_size=batch_size,
                      image_features_filename=object_image_features_filename)

num_training_samples = generator.training_dataset.shape[0]
num_validation_samples = generator.validation_dataset.shape[0]
print('Number of training samples:', num_training_samples)
print('Number of validation samples:', num_validation_samples)

print(generator.VOCABULARY_SIZE)
print(generator.IMG_FEATS)

model = NIC(max_token_length=generator.MAX_TOKEN_LENGTH,
            vocabulary_size=generator.VOCABULARY_SIZE,
            rnn='gru',
            num_image_features=generator.IMG_FEATS,
            hidden_size=180,
            embedding_size=150)
# print("test")
# model.load_weights("../trained_models/hashtag/hashtag_weights.240-4.3745.hdf5")
# model.compile(loss='categorical_crossentropy',
#               optimizer = 'adam',
#               metrics=['accuracy'])
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

print(model.summary())
print('Number of parameters:', model.count_params())
plot_model(model, show_shapes=True, to_file='./nuswide_NIH.png')
Esempio n. 3
0
print('Number of validation samples:', num_validation_samples)

print(generator.VOCABULARY_SIZE)
print(generator.IMG_FEATS)
#generator.flow(mode='train')
#generator.format_to_one_hot("beach sea trip island japan")
if (configs['w2v_weights']):
    embedding_weights = generator.embedding_matrix
else:
    embedding_weights = None

model = NIC(max_token_length=generator.MAX_TOKEN_LENGTH,
            vocabulary_size=generator.VOCABULARY_SIZE,
            tweet_max_len=configs['tweet_max_len'],
            tweet_max_words=configs['tweet_max_words'],
            rnn='gru',
            num_image_features=generator.IMG_FEATS,
            hidden_size=256,
            embedding_size=128,
            embedding_weights=embedding_weights)
# print("test")
# model.load_weights("../trained_models/hashtag/hashtag_weights.240-4.3745.hdf5")
# model.compile(loss='categorical_crossentropy',
#               optimizer = 'adam',
#               metrics=['accuracy'])

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

print(model.summary())
Esempio n. 4
0
print(generator.VOCABULARY_SIZE)
print(generator.IMG_FEATS)
#generator.flow(mode='train')
#generator.format_to_one_hot("beach sea trip island japan")

# model = NIC(max_token_length=12,
#             vocabulary_size=generator.VOCABULARY_SIZE,
#             rnn='gru',
#             num_image_features=generator.IMG_FEATS,
#             hidden_size=128,
#             embedding_size=128)

model = NIC(max_token_length=12,
            vocabulary_size=generator.VOCABULARY_SIZE,
            rnn='gru',
            num_image_features=generator.IMG_FEATS,
            hidden_size=128,
            embedding_size=128)
# print("test")
# model.load_weights("../trained_models/hashtag/hashtag_weights.240-4.3745.hdf5")
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

print(model.summary())
print('Number of parameters:', model.count_params())
plot_model(model, show_shapes=True, to_file='../images/NIC.png')

training_history_filename = preprocessed_data_path + 'training_hashtag_history.log'
csv_logger = CSVLogger(training_history_filename, append=False)
model_names = ('../trained_models/custom/' +