Esempio n. 1
0
X_test = np.array(X_test)
y_test = np.array(y_test)
mask_test = np.array(mask_test)
y_test = to_categorical(y_test, 2)

# 对数据进行随机打乱
permutation = np.random.permutation(X_test.shape[0])
X_test = X_test[permutation, :, :, :]
y_test = y_test[permutation, :]
mask_test = mask_test[permutation, :, :]

testData_sum = len(X_test)  # 训练样本总数
print("--------------加载测试数据完成------------")

use_cuda = torch.cuda.is_available()  # 环境中是否有GPU
net = CNN_LSTM.CNN_LSTM(cfg.ts)

if use_cuda and cfg.gpu:
    net = net.cuda()
    net = nn.DataParallel(net)

if cfg.re:
    print('------------------------------')
    print('==> 加载checkpoint ')
    if not os.path.exists(cfg.ckpt):
        raise AssertionError['找不到路径']
    checkpoint = torch.load(cfg.ckpt)
    net.load_state_dict(checkpoint['net'])
    best_test_acc = checkpoint['best_test_acc']
    print('best_test_acc is %.4f%%' % best_test_acc)
    best_test_acc_epoch = checkpoint['best_test_acc_epoch']
Esempio n. 2
0
def main(config):

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    train_dataset = CNN_LSTM_Triplet_Dataset(config)
    print(len(train_dataset))
    train_sampler = RandomSampler(train_dataset)
    train_loader = DataLoader(train_dataset,
                              batch_size=config["model"]["batch_size"],
                              sampler=train_sampler)

    model = CNN_LSTM(config).double().to(device=device)

    # if True:
    #     model.load_state_dict(torch.load("./best_model_eval/model_acc_83.3276.pth"))

    # optimizer = torch.optim.Adam(lr=1e-5, betas=(0.9, 0.98), eps=1e-9)
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'gamma', 'beta']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay_rate':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay_rate':
        0.0
    }]

    optimizer = torch.optim.Adam(lr=1e-2,
                                 betas=(0.9, 0.98),
                                 eps=1e-9,
                                 params=optimizer_grouped_parameters)
    loss_fn = nn.TripletMarginLoss()

    acc_dev_previous = 0
    loss_dev_previous = 1000
    loss_train = 1000

    for epoch in range(1, config["model"]["epoch"]):
        print(f"Training epoch {str(epoch)}")

        loss_train = train(train_loader, model, optimizer, loss_fn, loss_train,
                           epoch, device)

        print(f"Evaluate model.............")

        acc_dev, loss_dev = evaluate(dev_loader, model, loss_fn, epoch,
                                     len(dev_dataset), device)

        print(f"Accuracy score: {acc_dev:.4f} at epoch {epoch}")
        print(f"Loss Dev: {loss_dev:.4f} at epoch {epoch}")
        print("=" * 15, f"END EPOCH {epoch}", "=" * 15)

        if acc_dev > acc_dev_previous:
            acc_dev_previous = acc_dev
            torch.save(model.state_dict(),
                       f"./best_model_eval/model_acc_{round(acc_dev, 4)}.pth")
        if loss_dev < loss_dev_previous:
            loss_dev_previous = loss_dev
            torch.save(
                model.state_dict(),
                f"./best_model_eval/model_loss_{round(int(loss_dev), 4)}.pth")
Esempio n. 3
0
        testRoot, dl, 160, ts, lb)
    X_test = np.array(X_test)
    y_test = np.array(y_test)
    mask_test = np.array(mask_test)
    y_test = to_categorical(y_test, 22)

    # 对数据进行随机打乱
    permutation = np.random.permutation(X_test.shape[0])
    X_test = X_test[permutation, :, :, :]
    y_test = y_test[permutation, :]
    mask_test = mask_test[permutation, :, :]

    testData_sum = len(X_test)  # 训练样本总数
    print("--------------加载测试数据完成------------")

    inputs = torch.Tensor(X_test)
    target = torch.Tensor(y_test)
    mask = torch.Tensor(mask_test)

    net = CNN_LSTM.CNN_LSTM(ts)
    checkpoint = torch.load("./checkpoints/res1_52/CNN-LSTM-82.t7")
    net.load_state_dict(checkpoint['net'])

    x2_atten, atten2, x3_atten, atten3, x1, x2, x3, x4, x = net(inputs, mask)

    print(x2_atten.size())

    # Y = tsne(X, 2, 50, 20.0)
    # pylab.scatter(Y[:, 0], Y[:, 1], 20, labels)
    # pylab.show()
Esempio n. 4
0
	train_iter, dev_iter, test_iter = load_dataset(text_field, label_field, args, device=-1, repeat=False, shuffle=True)#数据载入
	args.VOCAB_SIZE = len(text_field.vocab)#文章中的表达词汇数目
	args.OUTPUT_SIZE = args.HIDDEN_DIM
	args.BEST_MODEL = "best_" + str(args.EMBEDDING_DIM) + ".pt"
	args.KERNEL_SIZES = [int(k) for k in args.KERNEL_SIZES.split(',')]

	#创建vocab字典
	index2class = label_field.vocab.itos[1:]
	index2class = list(map(int, index2class))

	print('Parameters:')

	for attr, value in sorted(args.__dict__.items()):
		if attr in {'vectors'}:
			continue
		print('\t{}={}'.format(attr.upper(), value))

	model = CNN_LSTM(index2class, args)
	model_name = train_model(model, train_iter, dev_iter, args)
	print('\n'+model_name)

	if args.DEVICE == 'cuda':
		model.cuda()

	model.load_state_dict(torch.load(model_name, map_location='cpu'))
	test_acc = test_model(model,test_iter,args, index2class)
	print(f'Test Acc: {test_acc:.2f}%')