Beispiel #1
0
    print('using cpu')
    dtype = torch.FloatTensor


# get a list of all the nodes in the file.
def iterate_data(h5_file):
    for x in h5_file.root:
        for y in x:
            yield y


f_nodes = [node for node in iterate_data(data_file)]

# split the database into train test and validation sets. default settings uses the json file
# with the karpathy split
train, val = split_data_coco(f_nodes)
# set aside 5000 images as test set
test = train[-5000:]
train = train[:-5000]

#####################################################
# network modules
img_net = img_encoder(image_config)
cap_net = text_gru_encoder(char_config)

# list all the trained model parameters
models = os.listdir(args.results_loc)
caption_models = [x for x in models if 'caption' in x]
img_models = [x for x in models if 'image' in x]

# create a trainer with just the evaluator for the purpose of testing a pretrained model
Beispiel #2
0
cuda = args.cuda and torch.cuda.is_available()
if cuda:
    print('using gpu')
else:
    print('using cpu')

# get a list of all the nodes in the file.
def iterate_data(h5_file):
    for x in h5_file.root:
        for y in x:
            yield y
f_nodes = [node for node in iterate_data(data_file)]
    
# split the database into train test and validation sets. default settings uses the json file
# with the karpathy split
train, test, val = split_data_coco(f_nodes, args.split_loc)
#####################################################
# network modules
img_net = img_encoder(image_config)
cap_net = text_gru_encoder(token_config)

# list all the trained model parameters
models = os.listdir(args.results_loc)
caption_models = [x for x in models if 'caption' in x]
img_models = [x for x in models if 'image' in x]

# run the image and caption retrieval
img_models.sort()
caption_models.sort()

# create a trainer with just the evaluator for the purpose of testing a pretrained model