if not use_symbolic_input and not shapes_dataset is None:
    print('N image features: {}'.format(n_image_features))
if rsa_sampling >= 0:
    print('N samples for RSA: {}'.format(rsa_sampling))
print()
#################################################

print("build vocab")
if not shapes_dataset is None:
    # Create vocab if there is not one for the desired size already
    if not does_vocab_exist(vocab_size):
        build_vocab(vocab_size)

print("loading vocab")
# Load vocab
word_to_idx, idx_to_word, bound_idx = load_dictionaries(
    'shapes' if not shapes_dataset is None else 'mscoco', vocab_size)
print("loading pretrained cnn")
# Load pretrained CNN if necessary
if not should_train_visual and not use_symbolic_input and not shapes_dataset is None:
    cnn_model_id = cnn_model_file_name.split('/')[-1]

    features_folder_name = 'data/shapes/{}_{}'.format(shapes_dataset,
                                                      cnn_model_id)

    # Check if the features were already extracted with this CNN
    if not os.path.exists(features_folder_name):
        # Load CNN from dumped model
        state = torch.load(cnn_model_file_name,
                           map_location=lambda storage, location: storage)
        cnn_state = {k[4:]: v for k, v in state.items() if 'cnn' in k}
        trained_cnn = CNN(n_image_features)
Пример #2
0
print('Target dataset: {}'.format(target_shapes_dataset))
print('Distractors dataset: {}'.format(distractors_shapes_dataset))
print('Lambda: {}'.format(vl_loss_weight))
print('Alpha: {}'.format(bound_weight))
print('N image features: {}'.format(n_image_features))
if rsa_sampling >= 0:
    print('N samples for RSA: {}'.format(rsa_sampling))
print()
#################################################

# Create vocab if there is not one for the desired size already
if not does_vocab_exist(vocab_size):
    build_vocab(vocab_size)

# Load vocab
word_to_idx, idx_to_word, bound_idx = load_dictionaries('shapes', vocab_size)

# Load/generate features
cnn_dump_id = model_file_name.split('/')[-1]

target_features_folder_name = 'data/shapes/{}_{}'.format(
    target_shapes_dataset, cnn_dump_id)

# Check if the features were already extracted with this CNN
if not os.path.exists(target_features_folder_name):
    # Load CNN from dumped model
    state = torch.load(model_file_name,
                       map_location=lambda storage, location: storage)
    cnn_state = {k[4:]: v for k, v in state.items() if 'cnn' in k}
    trained_cnn = CNN(n_image_features)
    trained_cnn.load_state_dict(cnn_state)