def train(epoch,args): net.train() train_loss = 0 correct = 0 total = 0 batch_idx = 0 ds = ImageDataset(args.dataset,dataset_load,'data/casia_landmark.txt',name=args.net+':train', bs=args.bs,shuffle=True,nthread=6,imagesize=128) while True: img,label = ds.get() if img is None: break inputs = torch.from_numpy(img).float() targets = torch.from_numpy(label[:,0]).long() if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() optimizer.zero_grad() inputs, targets = Variable(inputs), Variable(targets) outputs_1, outputs_2 = net(inputs) outputs = loss = criterion(outputs, targets) lossd = loss.data[0] loss.backward() optimizer.step() train_loss += loss.data[0] outputs = outputs[0] # 0=cos_theta 1=phi_theta _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += predicted.eq(targets.data).cpu().sum() printoneline(dt(),'Te=%d Loss=%.4f | AccT=%.4f%% (%d/%d) %.4f %.2f %d' % (epoch,train_loss/(batch_idx+1), 100.0*correct/total, correct, total, lossd, criterion.lamb, criterion.it)) batch_idx += 1 print('')
def train(epoch,args): featureNet.train() maskNet.train() fcNet.train() train_loss = 0 classification_loss = 0 correct = 0 total = 0 batch_idx = 0 ds = ImageDataset(args.dataset,dataset_load,'data/casia_landmark.txt',name=args.net+':train', bs=args.bs,shuffle=True,nthread=6,imagesize=128) while True: img,label = ds.get() if img is None: break inputs = torch.from_numpy(img).float() targets = torch.from_numpy(label[:,0]).long() if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() optimizerMask.zero_grad() inputs, targets = Variable(inputs), Variable(targets) features = featureNet(inputs) mask = maskNet(features) maskedFeatures = torch.mul(mask, features) outputs = fcNet(maskedFeatures) outputs1 = outputs[0] # 0=cos_theta 1=phi_theta _, predicted = torch.max(outputs1.data, 1) total += targets.size(0) correct += predicted.eq(targets.data).cpu().sum() # training the advNet: lossAdv = criterion(outputs, targets) lossCompact = torch.sum(conv2d(mask, laplacianKernel, stride=1, groups=512)) # lossSize #L1 norm of the mask to make the mask sparse. lossSize = F.l1_loss(mask, target=torch.ones(mask.size()).cuda(), size_average = False) print("advnet:", - criterion2(outputs1, targets).data/10, lossCompact.data/1000000, lossSize.data/10000) loss = - criterion2(outputs1, targets)/10 + lossCompact/1000000 + lossSize/10000 lossd = loss.data loss.backward(retain_graph=True) optimizerMask.step() optimizerFC.zero_grad() lossC = criterion(outputs, targets) lossClassification = lossC.data lossC.backward() optimizerFC.step() classification_loss += lossClassification train_loss += loss.data print("classification loss:", classification_loss / (batch_idx + 1)) printoneline(dt(),'Te=%d Loss=%.4f | AccT=%.4f%% (%d/%d) %.4f %.2f %d\n' % (epoch,train_loss/(batch_idx+1), 100.0*correct/total, correct, total, lossd, criterion.lamb, criterion.it)) batch_idx += 1 # break print('')
def train(epoch, args): net.train() train_loss = 0 correct = 0 total = 0 batch_idx = 0 ds = ImageDataset(imageroot=args.dataset, callback=dataset_load, imagelistfile=args.data_list, name=args.net + ':train', batchsize=args.batchsize, shuffle=True, nthread=args.nthread, imagesize=128) batch_num = ds.imagenum // args.batchsize while True: img, label = ds.get() if img is None: break inputs = torch.from_numpy(img).float() targets = torch.from_numpy(label[:, 0]).long() if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() optimizer.zero_grad() inputs, targets = Variable(inputs), Variable(targets) outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() outputs = outputs[0] # 0=cos_theta 1=phi_theta _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += predicted.eq(targets.data).sum().item() if batch_idx % 10 == 0: print( dt(), 'Epoch=%d batch: %d/%d Loss=%.4f | Acc=%.4f%%' % (epoch, batch_idx, batch_num, train_loss / (batch_idx + 1), correct * 100.0 / total)) batch_idx += 1