Пример #1
0
def main(args):

    # Build Models
    encoder = CNN(args.hidden_size)
    encoder.eval()  # evaluation mode (BN uses moving mean/variance)
    decoder = LSTM(args.embed_size, args.hidden_size,
                         len(vocab), args.num_layers)

    # Load the trained model parameters
    encoder.load_state_dict(torch.load(args.encoder_path))
    decoder.load_state_dict(torch.load(args.decoder_path))

    # load data set
    is_training = True
    testing_data = IDDataset(not is_training)

    # If use gpu
    if torch.cuda.is_available():
        encoder.cuda()
        decoder.cuda()

    test_acc = evaluation(testing_data, encoder, decoder)

    print("Accuracy is %.4f" % test_acc)
Пример #2
0
import torch.optim as optim
from torch.autograd import Variable
from net import CNN
import ShannonAndBirch

# 神经网络参数
batch_size = 128
learning_rate = 1e-3
num_epoches = 40
USE_GPU = torch.cuda.is_available()
datas = ShannonAndBirch.getdata()
dataset = ShannonAndBirch.trainAndtest(datas, datas[41], batch_size)
print(type(dataset[0]))
model = CNN(1, 2)
if USE_GPU:
    model = model.cuda()


def train():

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=learning_rate)

    for epoch in range(num_epoches):
        print('epoch {}'.format(epoch + 1))
        print('*' * 10)
        running_loss = 0.0
        running_acc = 0.0
        for i, data in enumerate(dataset[0], 1):
            img, label = data
            if USE_GPU:
Пример #3
0
def main(args):
    # Create model directory
    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)

    # load data set
    is_training = True
    training_data = IDDataset(is_training)
    testing_data = IDDataset(not is_training)

    # Build data loader
    data_loader = DataLoader(training_data,
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=args.num_workers,
                             collate_fn=collate_fn)

    # Build the models
    encoder = CNN(args.hidden_size)
    decoder = LSTM(args.embed_size, args.hidden_size, len(vocab),
                   args.num_layers)

    if torch.cuda.is_available():
        encoder.cuda()
        decoder.cuda()

    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    params = list(decoder.parameters()) + list(
        encoder.linear.parameters()) + list(encoder.bn.parameters())
    optimizer = torch.optim.Adam(params, lr=args.learning_rate)

    # Train the Models
    total_step = len(data_loader)
    for epoch in range(args.num_epochs):
        for i, (image_batch, id_batch) in enumerate(data_loader):
            # Set mini-batch dataset
            images = to_var(image_batch)
            captions = to_var(id_batch)
            targets = to_var(id_batch[:, 1:])

            # Forward, Backward and Optimize
            decoder.zero_grad()
            encoder.zero_grad()
            features = encoder(images)
            outputs = decoder(features, captions)
            loss = 0
            id_len = targets.size()[1]
            for j in xrange(id_len):
                loss += criterion(outputs[:, j, :], targets[:, j]) / id_len
            loss.backward()
            optimizer.step()

            # Print log info
            if i % args.log_step == 0:
                print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f' %
                      (epoch, args.num_epochs, i, total_step,
                       loss.cpu().data.numpy()))

    # Save the models
    torch.save(
        decoder.state_dict(),
        os.path.join(args.model_path,
                     'decoder-%d-%d.pkl' % (epoch + 1, i + 1)))
    torch.save(
        encoder.state_dict(),
        os.path.join(args.model_path,
                     'encoder-%d-%d.pkl' % (epoch + 1, i + 1)))