Example #1
0
image_attentions = np.zeros((len(p._original_questions), 4, 4, 2048))
with open('data/attentions_test.pkl', 'rb') as f:
    attentions = pickle.load(f)
    for i in range(len(p._original_questions)):
        image_attentions[i] = attentions[p._original_questions[i]['image_id']]

# Reshape
image_attentions = image_attentions.reshape(
    (image_features.shape[0], 16, 2048))
print(image_attentions.shape)

# Normalise features
for i in range(16):
    image_attentions[:, i, :] = normalize(image_attentions[:, i, :],
                                          copy=False)

# Predict and ensemble
pred = neuralnet.predict(image_attentions, object_matrix, questions,
                         'weights/weights_att_soft.hdf5')
pred = pred + neuralnet.predict(image_features, object_matrix, questions,
                                'weights/weights_baseline_soft.hdf5')
pred = 0.5 * pred
print(pred.shape)

# Produce results
model_evaluator = ProduceResult(p._int_to_answer,
                                p._answer_to_int,
                                dataSubType='test-dev2015',
                                modelName='ensemble')
model_evaluator.produce_results(pred, p._original_questions)
Example #2
0
                          monitor='val_loss',
                          loss='binary_crossentropy')

# Train network
neuralnet.fit(image_features, questions, answers)

# Load validation set and evaluate prediction on it
pt = PrepareData(
    path_images='data_vqa_feat',  # Path to image features 
    subset='val2014',  # Desired subset: either train2014 or val2014
    taskType=taskType,  # 'OpenEnded', 'MultipleChoice', 'all'
    cut_data=
    data_amount,  # Percentage of data to use, 1 = All values, above 1 = 10 samples for debugging
    output_path='data')
pt.loadDictionary('data/dictionary.pkl')  # Use same dictionary as in training
image_features, questions, _, annotations = pt.load_data()
print("Image features", image_features.shape)
print("Question features", questions.shape)
print("Dictionary size", pt.dic_size)

# Test prediction on validation set
pred = neuralnet.predict_current_state(image_features, questions)

print("=== Results on validation set ===")
model_evaluator = ProduceResult(pt._int_to_answer,
                                pt._answer_to_int,
                                dataSubType='val2014',
                                answer_count=pt._answer_count)
answers = model_evaluator.produce_results(pred, pt._original_questions)
model_evaluator.evaluate(taskType=taskType)
object_matrix = np.concatenate([object_matrix1, object_matrix2, object_matrix3], axis=1)
np.save('data/object_matrix_test.npy', object_matrix)
print(object_matrix.shape)
"""
object_matrix = np.load('data/object_matrix_test.npy')

# Test prediction on validation set
# pred = neuralnet.predict(image_features, object_matrix, questions, 'weights/weights-99.hdf5')
pred = neuralnet.predict_current_state(image_features, object_matrix,
                                       questions)
print(pred.shape)

# Evaluate model
model_evaluator = ProduceResult(pt._int_to_answer,
                                pt._answer_to_int,
                                dataSubType='test-dev2015',
                                modelName='baseline200')
model_evaluator.produce_results(pred, pt._original_questions)

# Test prediction on validation set
pred = neuralnet.predict(image_features, object_matrix, questions,
                         'weights/weights-59.hdf5')
# pred = neuralnet.predict_current_state(image_features, object_matrix, questions)
print(pred.shape)

# Evaluate model
model_evaluator = ProduceResult(pt._int_to_answer,
                                pt._answer_to_int,
                                dataSubType='test-dev2015',
                                modelName='baseline180')
model_evaluator.produce_results(pred, pt._original_questions)
Example #4
0
# pred = neuralnet.predict_current_state(image_features, object_matrix, questions)
pred = neuralnet.predict(image_features, object_matrix, questions,
                         'weights/weights-11-0.5826.hdf5')
# pred = np.round(pred)
print(pred)

# One hot encode
"""
predO = np.zeros((pred.shape[0], 2))
for i in range(pred.shape[0]):
    predO[i, int(pred[i, 0])] = 1
print(predO)
"""
from EvaluateModel import ProduceResult
model_evaluator = ProduceResult(p._int_to_answer,
                                p._answer_to_int,
                                dataSubType='train2014')
answers = model_evaluator.produce_results(pred, p._original_questions)
model_evaluator.evaluate(taskType='OpenEnded')

image_features = questions = answers = annotations = []
question_type = 'yes/no'
# Load validation set and evaluate prediction on it
pt = PrepareData(
    path_images='data_vqa_feat',  # Path to image features 
    subset='val2014',  # Desired subset: either train2014 or val2014
    taskType=taskType,  # 'OpenEnded', 'MultipleChoice', 'all'
    cut_data=
    data_amount,  # Percentage of data to use, 1 = All values, above 1 = 10 samples for debugging
    output_path='data',  # Path where we want to output temporary data
    pad_length=32,  # Number of words in a question (zero padded)
Example #5
0
print(image_features.shape)

# Normalise features
for i in range(16):
    image_features[:, i, :] = normalize(image_features[:, i, :], copy=False)

# Test prediction on test set
pred = neuralnet.predict(image_features, object_matrix, questions,
                         'weights/weights-299.hdf5')
pred = neuralnet.predict_current_state(image_features, object_matrix,
                                       questions)
print(pred.shape)

# Evaluate model
model_evaluator = ProduceResult(pt._int_to_answer,
                                pt._answer_to_int,
                                dataSubType='test-dev2015',
                                modelName='sandro_300')
model_evaluator.produce_results(pred, pt._original_questions)

# Test prediction on test set
pred = neuralnet.predict(image_features, object_matrix, questions,
                         'weights/weights-274.hdf5')
# pred = neuralnet.predict_current_state(image_features, object_matrix, questions)
print(pred.shape)

# Evaluate model
model_evaluator = ProduceResult(pt._int_to_answer,
                                pt._answer_to_int,
                                dataSubType='test-dev2015',
                                modelName='sandro275')
model_evaluator.produce_results(pred, pt._original_questions)