Exemple #1
0
    print("Note: You are using GPU for training")
    torch.cuda.set_device(args.gpu)
    torch.cuda.manual_seed(args.seed)
if torch.cuda.is_available() and not args.cuda:
    print(
        "Warning: You have Cuda but do not use it. You are using CPU for training"
    )

# load word vocab for questions, relation vocab for relations
word_vocab = torch.load(args.vocab_file)
print('load word vocab, size: %s' % len(word_vocab))
rel_vocab = torch.load(args.rel_vocab_file)
print('load relation vocab, size: %s' % len(rel_vocab))

# load data
train_loader = SeqRankingLoader(args.train_file, args.gpu)
print('load train data, batch_num: %d\tbatch_size: %d' %
      (train_loader.batch_num, train_loader.batch_size))
valid_loader = SeqRankingLoader(args.valid_file, args.gpu)
print('load valid data, batch_num: %d\tbatch_size: %d' %
      (valid_loader.batch_num, valid_loader.batch_size))

os.makedirs(args.save_path, exist_ok=True)

# define models
config = args
config.n_cells = config.n_layers

if config.birnn:
    config.n_cells *= 2
print(config)
Exemple #2
0
#-*- coding: utf-8 -*-

# Author: QuYingqi
# mail: [email protected]
# Created Time: 2017-11-09
import sys
import torch
from seqRankingLoader import SeqRankingLoader
import numpy as np
sys.path.append('../vocab')

rel_vocab = torch.load('../vocab/vocab.rel.pt')
neg_range = len(rel_vocab)
print(neg_range)
word_vocab = torch.load('../vocab/vocab.word.pt')
loader = SeqRankingLoader('data/test.relation_ranking.pt', 5, neg_range, 0)
batch_size = loader.batch_size
for i, batch in enumerate(loader.next_batch(False)):
    if i >= 1: break
    seqs, pos_rel, neg_rel = batch
    seqs_trans = np.transpose(seqs.cpu().data.numpy())
    pos_rel_trans = pos_rel.cpu().data.numpy()
    neg_rel_trans = np.transpose(neg_rel.cpu().data.numpy())
    for j in range(5):
        question = ' '.join(word_vocab.convert_to_word(seqs_trans[j]))
        print(question)
        pos_rel_ = rel_vocab.convert_to_word([pos_rel_trans[j]])
        print(pos_rel_)
        neg_rel_ = ' | '.join(rel_vocab.convert_to_word(neg_rel_trans[j]))
        print(neg_rel_)
Exemple #3
0
    torch.cuda.set_device(args.gpu)
    torch.cuda.manual_seed(args.seed)
if torch.cuda.is_available() and not args.cuda:
    print(
        "Warning: You have Cuda but do not use it. You are using CPU for training"
    )

# load word vocab for questions, relation vocab for relations
word_vocab = torch.load(args.vocab_file)
word_vocab.add_start_token()  # 加了替换sub_text的分隔符
print('load word vocab, size: %s' % len(word_vocab))
rel_vocab = torch.load(args.rel_vocab_file)
print('load relation vocab, size: %s' % len(rel_vocab))

# load data
train_loader = SeqRankingLoader(args.train_file, len(rel_vocab), args.gpu)
print('load train data, batch_num: %d\tbatch_size: %d' %
      (train_loader.batch_num, train_loader.batch_size))
valid_loader = SeqRankingLoader(args.valid_file, len(rel_vocab), args.gpu)
print('load valid data, batch_num: %d\tbatch_size: %d' %
      (valid_loader.batch_num, valid_loader.batch_size))

os.makedirs(args.save_path, exist_ok=True)

# define models
config = args
config.n_cells = config.n_layers

if config.birnn:
    config.n_cells *= 2
print(config)
Exemple #4
0
    print("Note: You are using GPU for training")
    torch.cuda.set_device(args.gpu)
    torch.cuda.manual_seed(args.seed)
if torch.cuda.is_available() and not args.cuda:
    print(
        "Warning: You have Cuda but do not use it. You are using CPU for training"
    )

# load word vocab for questions, relation vocab for relations
word_vocab = torch.load(args.vocab_file)
print('load word vocab, size: %s' % len(word_vocab))
rel_vocab = torch.load(args.rel_vocab_file)
print('load relation vocab, size: %s' % len(rel_vocab))

# load data
train_loader = SeqRankingLoader(args.train_file, args.neg_size, len(rel_vocab),
                                args.gpu)
print('load train data, batch_num: %d\tbatch_size: %d' %
      (train_loader.batch_num, train_loader.batch_size))
valid_loader = SeqRankingLoader(args.valid_file, args.neg_size, len(rel_vocab),
                                args.gpu)
print('load valid data, batch_num: %d\tbatch_size: %d' %
      (valid_loader.batch_num, valid_loader.batch_size))

os.makedirs(args.save_path, exist_ok=True)

# define models
config = args
config.n_cells = config.n_layers

if config.birnn:
    config.n_cells *= 2