コード例 #1
0
def get_model(config, args, train_dataset, device):

    # Pass vocabulary to construct Embedding layer.
    encoder = Encoder(config["model"], train_dataset.vocabulary)
    decoder = Decoder(config["model"], train_dataset.vocabulary)
    print("Encoder: {}".format(config["model"]["encoder"]))
    print("Decoder: {}".format(config["model"]["decoder"]))

    # New: Initializing word_embed using GloVe
    if "glove_npy" in config["dataset"]:
        encoder.word_embed.weight.data = torch.from_numpy(
            np.load(config["dataset"]["glove_npy"]))
        print("Loaded glove vectors from {}".format(
            config["dataset"]["glove_npy"]))

    # Share word embedding between encoder and decoder.
    if encoder.word_embed and decoder.word_embed:
        decoder.word_embed = encoder.word_embed

    # Wrap encoder and decoder in a model.
    model = EncoderDecoderModel(encoder, decoder).to(device)
    if -1 not in args.gpu_ids:
        model = nn.DataParallel(model, args.gpu_ids)
    return model
コード例 #2
0
    overfit=args.overfit,
    in_memory=args.in_memory,
    return_options=True,
    add_boundary_toks=False,
    sample_flag=False
)
val_dataloader = DataLoader(
    val_dataset,
    batch_size=config["solver"]["batch_size"],
    num_workers=args.cpu_workers,
    shuffle=True,
)

# Pass vocabulary to construct Embedding layer.
encoder = Encoder(config["model"], val_dataset.vocabulary)
decoder = Decoder(config["model"], val_dataset.vocabulary)
print("Encoder: {}".format(config["model"]["encoder"]))
print("Decoder: {}".format(config["model"]["decoder"]))

# Share word embedding between encoder and decoder.
if args.load_pthpath == "":
    print('load glove')
    decoder.word_embed = encoder.word_embed
    glove = np.load('data/glove.npy')
    encoder.word_embed.weight.data = torch.tensor(glove)

# Wrap encoder and decoder in a model.
model = EncoderDecoderModel(encoder, decoder).to(device)
if -1 not in args.gpu_ids:
    model = nn.DataParallel(model, args.gpu_ids)
コード例 #3
0
                             overfit=args.overfit,
                             in_memory=args.in_memory,
                             return_options=True,
                             add_boundary_toks=False,
                             sample_flag=False)
val_dataloader = DataLoader(
    val_dataset,
    batch_size=config["solver"]["batch_size"],
    num_workers=args.cpu_workers,
)

# Pass vocabulary to construct Embedding layer.
encoder_dict = Dict_Encoder(config_dict["model"],
                            train_sample_dataset.vocabulary)
encoder = Encoder(config["model"], train_sample_dataset.vocabulary)
decoder = Decoder(config["model"], train_sample_dataset.vocabulary)
decoder.word_embed = encoder.word_embed
model_dict = encoder_dict.to(device)
# Wrap encoder and decoder in a model.
model = EncoderDecoderModel(encoder, decoder).to(device)
if -1 not in args.gpu_ids:
    model = nn.DataParallel(model, args.gpu_ids)

criterion = nn.CrossEntropyLoss()
criterion_bce = nn.BCEWithLogitsLoss()
iterations = len(train_sample_dataset) // config["solver"]["batch_size"] + 1


def lr_lambda_fun(current_iteration: int) -> float:
    """Returns a learning rate multiplier.
コード例 #4
0
    num_workers=args.cpu_workers,
    shuffle=True,
)

val_dataloader = DataLoader(
    val_dataset,
    batch_size=actual_batch_size
    if config["model"]["decoder"] == "disc" else len(args.gpu_ids),
    num_workers=args.cpu_workers,
)

# Pass vocabulary to construct Embedding layer.
encoder = Encoder(config["model"], train_dataset.vocabulary)
if word_embedding_type == 'bert':
    decoder = Decoder(config["model"],
                      train_dataset.vocabulary,
                      bert_model=encoder.word_embed.bert)
else:
    decoder = Decoder(config["model"], train_dataset.vocabulary)
logger.info("Encoder: {}".format(config["model"]["encoder"]))
logger.info("Decoder: {}".format(config["model"]["decoder"]))

# Share word embedding between encoder and decoder.
if not word_embedding_type == 'bert':
    decoder.word_embed = encoder.word_embed

# Wrap encoder and decoder in a model.
model = EncoderDecoderModel(encoder, decoder).to(device)
if -1 not in args.gpu_ids:
    model = nn.DataParallel(model, args.gpu_ids)
コード例 #5
0
for item in KAT:
    elmo_token[item[1]] = elmo[item[0]]

elmo_list = []
for i in range(len(glovevocabulary)):
    if i in elmo_token.keys():
        elmo_list.append(elmo_token[i])
    else:
        randArray = random.random(size=(1, 1024)).tolist()
        elmo_list.append(randArray[0])
elmo_token = torch.Tensor(elmo_list).view(len(glovevocabulary), -1)

# Pass vocabulary to construct Embedding layer.
encoder = Encoder(config["model"], val_dataset.vocabulary, glove_token,
                  elmo_token)
decoder = Decoder(config["model"], val_dataset.vocabulary, glove_token,
                  elmo_token)
print("Encoder: {}".format(config["model"]["encoder"]))
print("Decoder: {}".format(config["model"]["decoder"]))

# Share word embedding between encoder and decoder.
decoder.glove_embed = encoder.glove_embed
decoder.elmo_embed = encoder.elmo_embed
decoder.embed_change = encoder.embed_change

# Wrap encoder and decoder in a model.
model = EncoderDecoderModel(encoder, decoder).to(device)
if -1 not in args.gpu_ids:
    model = nn.DataParallel(model, args.gpu_ids)

model_state_dict, _ = load_checkpoint(args.load_pthpath)
if isinstance(model, nn.DataParallel):
コード例 #6
0
ファイル: bot-final.py プロジェクト: sashank06/visdialciss
# keys: {"dataset", "model", "solver"}
config = yaml.load(open('checkpoints/new_features_baseline/config.yml'))

val_dataset = VisDialDataset(
    config["dataset"],
    "data/visdial_1.0_val.json",
    "data/visdial_1.0_val_dense_annotations.json",
    return_options=True,
    add_boundary_toks=False
    if config["model"]["decoder"] == "disc"
    else True,
)

# Pass vocabulary to construct Embedding layer.
encoder = Encoder(config["model"], val_dataset.vocabulary)
decoder = Decoder(config["model"], val_dataset.vocabulary)
print("Encoder: {}".format(config["model"]["encoder"]))
print("Decoder: {}".format(config["model"]["decoder"]))

model = EncoderDecoderModel(encoder, decoder)
model_state_dict, _ = load_checkpoint('checkpoints/new_features_baseline/checkpoint_10.pth')
if isinstance(model, nn.DataParallel):
    model.module.load_state_dict(model_state_dict)
else:
    model.load_state_dict(model_state_dict)
model.eval()

with open('data/val_data.pkl','rb') as file:
    (img_ids, caption_vectors, all_captions, all_questions, all_questions_vectors,
    all_answers, all_questions) = pickle.load(file)