def train(epochs, train_loader, dev_loader, lr, seed, log_interval,
          output_dir):
    """Train the model. Store snapshot models in the output_dir alongside
    evaluations on the dev set after each epoch
    """

    model = Net()

    optimizer = optim.Adam(model.parameters(), lr=lr)

    measure_size(model)

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")
    print("Using device: ", device)

    if use_cuda:
        torch.cuda.manual_seed(seed)
    else:
        torch.manual_seed(seed)

    #torch.backends.cudnn.benchmark = False
    #torch.backends.cudnn.deterministic = True

    model.to(device)

    for epoch in range(1, epochs):

        model.train()
        total_loss = 0.0
        for batch_idx, (data, target) in enumerate(train_loader):
            if use_cuda:
                data, target = data.to(device), target.to(device)
            data = data.unsqueeze_(1)

            optimizer.zero_grad()
            output = model(data)
            loss = F.nll_loss(output, target)
            total_loss += loss.item()
            loss.backward()
            optimizer.step()

            if batch_idx % log_interval == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                    100. * batch_idx / len(train_loader), loss.item()))

        print("Total loss = %.6f" % (total_loss / len(train_loader.dataset)))

        test(model, dev_loader,
             os.path.join(output_dir, 'dev-eer-' + str(epoch)))

        torch.save(model, os.path.join(output_dir,
                                       'iter' + str(epoch) + '.mdl'))
#new_state_dict = OrderedDict()

# 用了nn.DataParallel的模型需要处理才能在cpu上使用
'''for k, v in checkpoint.items():
    name = k[7:]  # remove module.
    new_state_dict[name] = v

model.load_state_dict(new_state_dict)'''
#model.load_state_dict(torch.load('./weights/best-8-24.pth.tar'))

model.load_state_dict(torch.load('./weights/best_model-20200904.pth.tar'))

model.eval()

model = model.to(device)

a = 0
b = 0

start_time = time.time()

for file in tqdm(os.listdir(data_path)):
    img_path = os.path.join(data_path, file)

    img = cv2.imread(img_path)
    #img = cv2.cvtColor(img,  cv2.COLOR_RGB2BGR)
    img = Image.fromarray(img)
    frame = transform(img)
    frame = torch.unsqueeze(frame, 0)
    frame = frame.to(device)
Exemple #3
0
                         transform=image_transforms['train']),
    'valid':
    datasets.ImageFolder(root = os.path.join(data_path, 'valid'),
                         transform=image_transforms['valid'])
}


# Dataloader iterators, make sure to shuffle
dataloaders = {
    'train': DataLoader(data['train'], batch_size = 64, shuffle = True, num_workers = 8, pin_memory = True),
    'valid': DataLoader(data['valid'], batch_size =64, shuffle = True, num_workers = 8, pin_memory = True),
}

net = Net()
print(net)
net.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-5)

epoches=300

eval_acc_list = []
is_best = False
###训练网络
for epoch in range(epoches):
    train_loss = 0.
    train_acc = 0.
    for inputs, targets in dataloaders['train']:
    #get the inputs
        inputs = inputs.to(device)