config = args

config.n_embed = len(inputs.vocab)
config.d_out = len(answers.vocab)
config.n_cells = config.n_layers

# double the number of cells for bidirectional networks
if config.birnn:
    config.n_cells *= 2

if args.resume_snapshot:
    model = torch.load(
        args.resume_snapshot,
        map_location=lambda storage, location: storage.cuda(args.gpu))
else:
    model = LSTMSentiment(config)
    if args.word_vectors:
        model.embed.weight.data = inputs.vocab.vectors
        model.cuda()

criterion = nn.CrossEntropyLoss()

opt = O.Adam(model.parameters())

# model.embed.requires_grad = False

iterations = 0
start_time = time.time()
best_dev_acc = -1
train_iter.repeat = False
header = '  Time Epoch     Loss   Dev/Loss  CD Loss    Accuracy  Dev/Accuracy'
Пример #2
0
config = args

config.n_embed = len(inputs.vocab)
config.d_out = len(answers.vocab)
config.n_cells = config.n_layers

# double the number of cells for bidirectional networks
if config.birnn:
    config.n_cells *= 2

if args.resume_snapshot:
    model = torch.load(
        args.resume_snapshot,
        map_location=lambda storage, location: storage.cuda(args.gpu))
else:
    model = LSTMSentiment(config)
    if args.word_vectors:
        model.embed.weight.data = inputs.vocab.vectors
        model.cuda()

criterion = nn.CrossEntropyLoss()

opt = O.Adam(model.parameters())  # , lr=args.lr)

# model.embed.requires_grad = False

iterations = 0
start_time = time.time()
best_dev_acc = -1
train_iter.repeat = False
header = '  Time Epoch     Loss   Dev/Loss  CD Loss    Accuracy  Dev/Accuracy'
Пример #3
0
def test_load_save_state():
    model = LSTMSentiment(0, 2, 300, 150, 3, 'lstm', nn.NLLLoss())
    model2 = LSTMSentiment(0, 2, 300, 150, 3, 'lstm', nn.NLLLoss())
    model.save_state_files('.')
    model2.load_state_files('.')
    model3 = LSTMSentiment(0, 1, 300, 150, 3, 'lstm', nn.NLLLoss())

    with pytest.raises(Exception):
        model3.load_state_files('.')
Пример #4
0
answers.build_vocab(train)

train_iter, dev_iter, test_iter = data.BucketIterator.splits(
    (train, dev, test), batch_size=args.batch_size, device=args.gpu)

config = args
config.n_embed = len(inputs.vocab)
config.d_out = len(answers.vocab)
config.n_cells = config.n_layers

if args.resume_snapshot:
    model = torch.load(
        args.resume_snapshot,
        map_location=lambda storage, location: storage.cuda(args.gpu))
else:
    model = LSTMSentiment(config)
    if args.word_vectors:
        model.embed.weight.data = inputs.vocab.vectors
        model.cuda()

criterion = nn.CrossEntropyLoss()
opt = O.Adam(model.parameters())

iterations = 0
start = time.time()
best_dev_acc = -1
train_iter.repeat = False
header = '  Time Epoch Iteration Progress    (%Epoch)   Loss   Dev/Loss     Accuracy  Dev/Accuracy'
dev_log_template = ' '.join(
    '{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{:8.6f},{:12.4f},{:12.4f}'
    .split(','))
Пример #5
0
    answers,
    fine_grained=False,
    train_subtrees=True,
    filter_pred=lambda ex: ex.label != 'neutral')
inputs.build_vocab(train, dev, test)
inputs.vocab.load_vectors('glove.6B.300d')
answers.build_vocab(train)
train_iter, dev_iter, test_iter = data.BucketIterator.splits(
    (train, dev, test), batch_size=50, repeat=False, device=DEVICE)

############################################
# CREATING MODEL
print("Creating model")
model = LSTMSentiment(embedding_dim=300,
                      hidden_dim=168,
                      vocab_size=300,
                      label_size=2,
                      gpu_device=DEVICE)
model.word_embeddings.weight.data = inputs.vocab.vectors
model.cuda(device=DEVICE)

# Load previously checkpointed model if it exists
model_path = "model.pt"
if os.path.exists(model_path) and RESUME_CKPT:
    print("Loading previously stored model")
    model = torch.load(model_path)

loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

#############################################
Пример #6
0

# build a new model
class Config(object):
    d_hidden = model.hidden_dim
    n_embed = model.vocab_size + 1
    d_embed = model.emb_dim
    d_out = model.num_out
    batch_size = model.batch_size
    birnn = False


if USE_RANDOM_MODEL_BASELINE:
    from model import LSTMSentiment
    config = Config()
    new_model = LSTMSentiment(config)
    new_model.embed.weight.data.copy_(model.embed.weight.data)  # glove data
    new_model.use_gpu = False
    model = new_model  # completely random new model


# [l, r], a = previous line, b = current line
# children: [num_words]
# tree_dict: {}
def build_tree(l, r, a, b, score, tree_dict):
    ret = []
    i = l
    while i <= r:
        if a[i] != 0:
            j = nonzero(i, a)
            ret.append(tree_dict[(i, j)])