Example #1
0
image_attentions = np.zeros((len(p._original_questions), 4, 4, 2048))
with open('data/attentions_test.pkl', 'rb') as f:
    attentions = pickle.load(f)
    for i in range(len(p._original_questions)):
        image_attentions[i] = attentions[p._original_questions[i]['image_id']]

# Reshape
image_attentions = image_attentions.reshape(
    (image_features.shape[0], 16, 2048))
print(image_attentions.shape)

# Normalise features
for i in range(16):
    image_attentions[:, i, :] = normalize(image_attentions[:, i, :],
                                          copy=False)

# Predict and ensemble
pred = neuralnet.predict(image_attentions, object_matrix, questions,
                         'weights/weights_att_soft.hdf5')
pred = pred + neuralnet.predict(image_features, object_matrix, questions,
                                'weights/weights_baseline_soft.hdf5')
pred = 0.5 * pred
print(pred.shape)

# Produce results
model_evaluator = ProduceResult(p._int_to_answer,
                                p._answer_to_int,
                                dataSubType='test-dev2015',
                                modelName='ensemble')
model_evaluator.produce_results(pred, p._original_questions)
Example #2
0
                          monitor='val_loss',
                          loss='binary_crossentropy')

# Train network
neuralnet.fit(image_features, questions, answers)

# Load validation set and evaluate prediction on it
pt = PrepareData(
    path_images='data_vqa_feat',  # Path to image features 
    subset='val2014',  # Desired subset: either train2014 or val2014
    taskType=taskType,  # 'OpenEnded', 'MultipleChoice', 'all'
    cut_data=
    data_amount,  # Percentage of data to use, 1 = All values, above 1 = 10 samples for debugging
    output_path='data')
pt.loadDictionary('data/dictionary.pkl')  # Use same dictionary as in training
image_features, questions, _, annotations = pt.load_data()
print("Image features", image_features.shape)
print("Question features", questions.shape)
print("Dictionary size", pt.dic_size)

# Test prediction on validation set
pred = neuralnet.predict_current_state(image_features, questions)

print("=== Results on validation set ===")
model_evaluator = ProduceResult(pt._int_to_answer,
                                pt._answer_to_int,
                                dataSubType='val2014',
                                answer_count=pt._answer_count)
answers = model_evaluator.produce_results(pred, pt._original_questions)
model_evaluator.evaluate(taskType=taskType)