Ejemplo n.º 1
0
data_amount = 1
soft_labels = True

if soft_labels:
    from PrepareDataSoftLabels import PrepareData
else:
    from PrepareOriginalData import PrepareData

# Load training set
# Note: this operation is needed only to generate a dictionary, but one could also load a saved one instead
p = PrepareData(
    path_images='data_vqa_feat',  # Path to image features 
    subset='trainval',  # Desired subset: either train2014, val2014 or trainval
    taskType='OpenEnded',  # 'OpenEnded', 'MultipleChoice', 'all'
    cut_data=
    data_amount,  # Percentage of data to use, 1 = All values, above 1=#samples for debugging
    output_path='data',  # Path where we want to output temporary data
    pad_length=32,  # Number of words in a question (zero padded)
    question_threshold=6,
    answer_threshold=3,  # Keep only most common words
    questions_sparse=False,
    image_extractor='RawImages')
image_features, questions, answers, annotations = p.load_data()
print("Image features", image_features.shape)
print("Question features", questions.shape)
print("Answers", answers.shape)
print("Dictionary size", p.dic_size)
p.dumpDictionary()

# Define neural network
if soft_labels:
    neuralnet = NeuralNetwork(image_features.shape[0],
Ejemplo n.º 2
0
from NeuralNetworkSoftLabels import NeuralNetwork
import numpy as np
from EvaluateModel import ProduceResult

# Some constants
taskType = 'all'
data_amount = 1
epochs = 50

# Load training set
p = PrepareData(
    path_images='data_vqa_feat',  # Path to image features 
    subset='trainval',  # Desired subset: either train2014 or val2014
    taskType='OpenEnded',  # 'OpenEnded', 'MultipleChoice', 'all'
    cut_data=
    data_amount,  # Percentage of data to use, 1 = All values, above 1=#samples for debugging
    output_path='data',  # Path where we want to output temporary data
    pad_length=32,  # Number of words in a question (zero padded)
    question_threshold=0,
    answer_threshold=0,  # Keep only most common words
    questions_sparse=True)
image_features, questions, answers, annotations = p.load_data()
print("Image features", image_features.shape)
print("Question features", questions.shape)
print("Answers", answers.shape)
print("Dictionary size", p.dic_size)

# Save dictionary
p.dumpDictionary()

# Use this when using sparse representation