Ejemplo n.º 1
0
                          monitor='val_loss',
                          loss='binary_crossentropy')

# Train network
neuralnet.fit(image_features, questions, answers)

# Load validation set and evaluate prediction on it
pt = PrepareData(
    path_images='data_vqa_feat',  # Path to image features 
    subset='val2014',  # Desired subset: either train2014 or val2014
    taskType=taskType,  # 'OpenEnded', 'MultipleChoice', 'all'
    cut_data=
    data_amount,  # Percentage of data to use, 1 = All values, above 1 = 10 samples for debugging
    output_path='data')
pt.loadDictionary('data/dictionary.pkl')  # Use same dictionary as in training
image_features, questions, _, annotations = pt.load_data()
print("Image features", image_features.shape)
print("Question features", questions.shape)
print("Dictionary size", pt.dic_size)

# Test prediction on validation set
pred = neuralnet.predict_current_state(image_features, questions)

print("=== Results on validation set ===")
model_evaluator = ProduceResult(pt._int_to_answer,
                                pt._answer_to_int,
                                dataSubType='val2014',
                                answer_count=pt._answer_count)
answers = model_evaluator.produce_results(pred, pt._original_questions)
model_evaluator.evaluate(taskType=taskType)
Ejemplo n.º 2
0
# pred = np.round(pred)
print(pred)

# One hot encode
"""
predO = np.zeros((pred.shape[0], 2))
for i in range(pred.shape[0]):
    predO[i, int(pred[i, 0])] = 1
print(predO)
"""
from EvaluateModel import ProduceResult
model_evaluator = ProduceResult(p._int_to_answer,
                                p._answer_to_int,
                                dataSubType='train2014')
answers = model_evaluator.produce_results(pred, p._original_questions)
model_evaluator.evaluate(taskType='OpenEnded')

image_features = questions = answers = annotations = []
question_type = 'yes/no'
# Load validation set and evaluate prediction on it
pt = PrepareData(
    path_images='data_vqa_feat',  # Path to image features 
    subset='val2014',  # Desired subset: either train2014 or val2014
    taskType=taskType,  # 'OpenEnded', 'MultipleChoice', 'all'
    cut_data=
    data_amount,  # Percentage of data to use, 1 = All values, above 1 = 10 samples for debugging
    output_path='data',  # Path where we want to output temporary data
    pad_length=32,  # Number of words in a question (zero padded)
    question_threshold=0,
    answer_threshold=10,  # Keep only most common words
    questions_sparse=True,