def train(train_loader, net, criterion, optimizer, epoch): """ train for one epoch on the training set """ batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) losses = meter.AverageValueMeter() prec = meter.ClassErrorMeter(topk=[1], accuracy=True) # training mode net.train() for i, (views, pcs, labels) in enumerate(train_loader): batch_time.reset() views = views.to(device=config.device) pcs = pcs.to(device=config.device) labels = labels.to(device=config.device) preds = net(pcs, views) # bz x C x H x W loss = criterion(preds, labels) prec.add(preds.detach(), labels.detach()) losses.add(loss.item()) # batchsize optimizer.zero_grad() loss.backward() optimizer.step() if i % config.print_freq == 0: print(f'Epoch: [{epoch}][{i}/{len(train_loader)}]\t' f'Batch Time {batch_time.value():.3f}\t' f'Epoch Time {data_time.value():.3f}\t' f'Loss {losses.value()[0]:.4f} \t' f'Prec@1 {prec.value(1):.3f}\t') print(f'prec at epoch {epoch}: {prec.value(1)} ')
def validate(val_loader, net, epoch): """ validation for one epoch on the val set """ batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) prec = meter.ClassErrorMeter(topk=[1], accuracy=True) # testing mode net.eval() for i, (views, pcs, labels) in enumerate(val_loader): batch_time.reset() views = views.to(device=config.device) pcs = pcs.to(device=config.device) labels = labels.to(device=config.device) preds = net(pcs, views) # bz x C x H x W prec.add(preds.data, labels.data) prec.add(preds.data, labels.data) if i % config.print_freq == 0: print(f'Epoch: [{epoch}][{i}/{len(val_loader)}]\t' f'Batch Time {batch_time.value():.3f}\t' f'Epoch Time {data_time.value():.3f}\t' f'Prec@1 {prec.value(1):.3f}\t') print(f'mean class accuracy at epoch {epoch}: {prec.value(1)} ') return prec.value(1)
def train(train_loader, model, criterion, optimizer, epoch, cfg): """ train for one epoch on the training set """ batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) losses = meter.AverageValueMeter() prec = meter.ClassErrorMeter(topk=[1], accuracy=True) # training mode model.train() for i, (shapes, labels) in enumerate(train_loader): batch_time.reset() # bz x 12 x 3 x 224 x 224 labels = labels.long().view(-1) shapes = Variable(shapes) labels = Variable(labels) if cfg.cuda: shapes = shapes.cuda() labels = labels.cuda() preds = model(shapes) # bz x C x H x W if cfg.have_aux: preds, aux = preds loss_main = criterion(preds, labels) loss_aux = criterion(aux, labels) softmax_loss = loss_main + 0.3 * loss_aux else: softmax_loss = criterion(preds, labels) loss = softmax_loss prec.add(preds.data, labels.data) losses.add(loss.data[0], preds.size(0)) # batchsize optimizer.zero_grad() loss.backward() optimizer.step() if i % cfg.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Batch Time {batch_time:.3f}\t' 'Epoch Time {data_time:.3f}\t' 'Loss {loss:.4f} \t' 'Prec@1 {top1:.3f}\t'.format(epoch, i, len(train_loader), batch_time=batch_time.value(), data_time=data_time.value(), loss=losses.value()[0], top1=prec.value(1))) print('prec at epoch {0}: {1} '.format(epoch, prec.value(1)))
def validate(val_loader, net, epoch, print_pr=False): """ validation for one epoch on the val set """ batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) prec = meter.ClassErrorMeter(topk=[1], accuracy=True) retrieval_map = meter.RetrievalMAPMeter() # testing mode net.eval() total_seen_class = [0 for _ in range(40)] total_right_class = [0 for _ in range(40)] for i, (views, pcs, labels) in enumerate(val_loader): batch_time.reset() views = views.to(device=config.device) pcs = pcs.to(device=config.device) labels = labels.to(device=config.device) preds, fts = net(pcs, views, get_fea=True) # bz x C x H x W # prec.add(preds.data, labels.data) prec.add(preds.data, labels.data) retrieval_map.add(fts.detach() / torch.norm(fts.detach(), 2, 1, True), labels.detach()) for j in range(views.size(0)): total_seen_class[labels.data[j]] += 1 total_right_class[labels.data[j]] += (np.argmax( preds.data, 1)[j] == labels.cpu()[j]) if i % config.print_freq == 0: print( f'Epoch: [{epoch}][{i}/{len(val_loader)}]\t' f'Batch Time {batch_time.value():.3f}\t' f'Epoch Time {data_time.value():.3f}\t' f'Prec@1 {prec.value(1):.3f}\t' f'Mean Class accuracy {(np.mean(np.array(total_right_class)/np.array(total_seen_class,dtype=np.float))):.3f}' ) mAP = retrieval_map.mAP() print(f' instance accuracy at epoch {epoch}: {prec.value(1)} ') print( f' mean class accuracy at epoch {epoch}: {(np.mean(np.array(total_right_class)/np.array(total_seen_class,dtype=np.float)))} ' ) print(f' map at epoch {epoch}: {mAP} ') if print_pr: print(f'pr: {retrieval_map.pr()}') return prec.value(1), mAP
def train(train_loader, model, criterion, optimizer, epoch, cfg): """ train for one epoch on the training set """ batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) losses = meter.AverageValueMeter() prec = meter.ClassErrorMeter(topk=[1], accuracy=True) # training mode model.train() for i, (meshes, adjs, labels) in enumerate(train_loader): batch_time.reset() # bz x n x 3 meshes = meshes.transpose(1, 2) labels = labels.long().view(-1) if cfg.cuda: meshes = meshes.cuda() adjs = adjs.cuda() labels = labels.cuda() preds, _ = model(meshes) # bz x C x H x W loss = criterion(preds, labels) # print('pred: %d, gt: %d'%(torch.argmax(preds.cpu().data), labels.item())) prec.add(preds.cpu().data.numpy(), labels.item()) losses.add(loss.item(), preds.size(0)) # batchsize optimizer.zero_grad() loss.backward() optimizer.step() if i % cfg.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Batch Time {batch_time:.3f}\t' 'Epoch Time {data_time:.3f}\t' 'Loss {loss:.4f} \t' 'Prec@1 {top1:.3f}\t'.format(epoch, i, len(train_loader), batch_time=batch_time.value(), data_time=data_time.value(), loss=losses.value()[0], top1=prec.value(1))) print('prec at epoch {0}: {1} '.format(epoch, prec.value(1)))
def validate(val_loader, model, epoch, cfg): """ validation for one epoch on the val set """ batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) prec = meter.ClassErrorMeter(topk=[1], accuracy=True) # testing mode model.eval() for i, (shapes, labels) in enumerate(val_loader): batch_time.reset() # bz x 12 x 3 x 224 x 224 labels = labels.long().view(-1) shapes = Variable(shapes) labels = Variable(labels) # shift data to GPU if cfg.cuda: shapes = shapes.cuda() labels = labels.cuda() # forward, backward optimize preds = model(shapes) if cfg.have_aux: preds, aux = preds prec.add(preds.data, labels.data) if i % cfg.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Batch Time {batch_time:.3f}\t' 'Epoch Time {data_time:.3f}\t' 'Prec@1 {top1:.3f}\t'.format(epoch, i, len(val_loader), batch_time=batch_time.value(), data_time=data_time.value(), top1=prec.value(1))) print('mean class accuracy at epoch {0}: {1} '.format( epoch, prec.value(1))) return prec.value(1)
def validate(val_loader, model, epoch, cfg): """ validation for one epoch on the val set """ batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) prec = meter.ClassErrorMeter(topk=[1], accuracy=True) # testing mode model.eval() for i, (meshes, adjs, labels) in enumerate(val_loader): batch_time.reset() # bz x n x 3 meshes = meshes.transpose(1, 2) labels = labels.long().view(-1) # shift data to GPU if cfg.cuda: meshes = meshes.cuda() adjs = adjs.cuda() labels = labels.cuda() # forward, backward optimize preds, _ = model(meshes) prec.add(preds.cpu().data.numpy(), labels.item()) if i % cfg.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Batch Time {batch_time:.3f}\t' 'Epoch Time {data_time:.3f}\t' 'Prec@1 {top1:.3f}\t'.format(epoch, i, len(val_loader), batch_time=batch_time.value(), data_time=data_time.value(), top1=prec.value(1))) print('mean class accuracy at epoch {0}: {1} '.format( epoch, prec.value(1))) return prec.value(1)
def test(test_loader, model, cfg): """ test for one epoch on the testing set """ batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) prec = meter.ClassErrorMeter(topk=[1], accuracy=True) mAP = meter.mAPMeter() # training mode model.eval() for i, (shapes, labels) in enumerate(test_loader): batch_time.reset() # bz x 12 x 3 x 224 x 224 labels = labels.long().view(-1) shapes = Variable(shapes) labels = Variable(labels) # shift data to GPU if cfg.cuda: shapes = shapes.cuda() labels = labels.cuda() # forward, backward optimize preds = model(shapes) labels_oh = torch.zeros(labels.data.size(0), cfg.class_num)\ .scatter_(1, labels.cpu().data.unsqueeze(1), 1) mAP.add(preds.data, labels_oh) prec.add(preds.data, labels.data) if i % cfg.print_freq == 0: print('[{0}/{1}]\t' 'Batch Time {batch_time:.3f}\t' 'Epoch Time {data_time:.3f}\t' 'Prec@1 {top1:.3f}\t'.format(i, len(test_loader), batch_time=batch_time.value(), data_time=data_time.value(), top1=prec.value(1))) print('mean class accuracy : {0} '.format(prec.value(1))) print('mAP: %f' % mAP.value())
def validate(val_loader, net): """ validation for one epoch on the val set """ batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) prec = meter.ClassErrorMeter(topk=[1], accuracy=True) retrieval = meter.RetrievalMAPMeter() ft_all, lbl_all = None, None # testing mode net.eval() for i, (views, labels) in enumerate(val_loader): batch_time.reset() # bz x 12 x 3 x 224 x 224 views = views.to(device=config.device) labels = labels.to(device=config.device) preds, fts = net(views, get_ft=True) # bz x C x H x W prec.add(preds.detach(), labels.detach()) retrieval.add(fts.detach(), labels.detach()) # ft_all = append(ft_all, fts.detach()) # lbl_all = append(lbl_all, labels.detach(), flaten=True) if i % config.print_freq == 0: print(f'[{i}/{len(val_loader)}]\t' f'Batch Time {batch_time.value():.3f}\t' f'Epoch Time {data_time.value():.3f}\t' f'Prec@1 {prec.value(1):.3f}\t') # mAP = cal_map(ft_all, lbl_all) mAP = retrieval.mAP() print(f'mean class accuracy : {prec.value(1)} ') print(f'Retrieval mAP : {mAP} ') return prec.value(1), mAP
def train(train_loader, net, criterion, optimizer, lr_scheduler, epoch): """ train for one epoch on the training set """ batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) losses = meter.AverageValueMeter() prec = meter.ClassErrorMeter(topk=[1], accuracy=True) # training mode net.train() alpha = 0.01 for i, (views, dps, pcs, labels) in enumerate(train_loader): batch_time.reset() # Data views = views.to(device=config.device) pcs = pcs.to(device=config.device) dps = dps.to(device=config.device) labels = labels.to(device=config.device) # print(f'DataSize:\nmulti-views:{views.size()}\ndepth-images:{dps.size()}\npoint-cloud:{pcs.size()}\nlabels:{labels.size()}') # Network f_pc, f_mv, f_dp, _, _, _, de_p, de_v, de_d, dis_p, dis_v, dis_d, cls_p, cls_v, cls_d, fea, preds = net(pcs, views, dps) # bz x C x H x W # Generator optimizer[0].zero_grad() rl1 = criterion[1](de_d, f_dp) + criterion[1](de_p, f_pc) + criterion[1](de_v, f_mv) valid = torch.FloatTensor(20, 1).fill_(1.0).to(device=config.device) fake = torch.FloatTensor(20, 1).fill_(0.0).to(device=config.device) g_loss = alpha * (criterion[0](dis_v, valid) + criterion[0](dis_p, valid)) + (1 - alpha) * rl1 g_loss.backward(retain_graph=True) optimizer[0].step() lr_scheduler[0].step(epoch=epoch) # Classifier optimizer[2].zero_grad() c_loss = criterion[2](cls_p, f_pc) + criterion[2](cls_v, f_pc) + criterion[2](cls_d, f_pc) # Different from ARGF c_loss.backward(retain_graph=True) optimizer[2].step() lr_scheduler[2].step(epoch=epoch) # Discriminator optimizer[1].zero_grad() real_loss = criterion[0](dis_d, valid) # avg_v += torch.sum(dis_d.squeeze().data) / (len(train_loader) * config.pvd_net.train.batch_sz) fake_loss = criterion[0](dis_p, fake) + criterion[0](dis_v, fake) d_loss = 0.5 * (real_loss + fake_loss) d_loss.backward(retain_graph=True) optimizer[1].step() lr_scheduler[1].step(epoch=epoch) # Fusion optimizer[3].zero_grad() loss = criterion[3](preds, labels) loss.backward() optimizer[3].step() lr_scheduler[3].step(epoch=epoch) prec.add(preds.detach(), labels.detach()) losses.add(loss.item()) # batchsize if i % config.print_freq == 0: print(f'Epoch: [{epoch}][{i}/{len(train_loader)}]\t' f'Batch Time {batch_time.value():.3f}\t' f'Epoch Time {data_time.value():.3f}\t' f'Loss {losses.value()[0]:.4f} \t' f'Prec@1 {prec.value(1):.3f}\t') print(f'prec at epoch {epoch}: {prec.value(1)} ')
def get_feature(cfg, test_loader, model, f): """ test for one epoch on the testing set """ f.create_dataset('features', shape=(len(test_loader), 2048)) f.create_dataset('labels', shape=(len(test_loader), 1)) batch_time = meter.TimeMeter(True) data_time = meter.TimeMeter(True) prec = meter.ClassErrorMeter(topk=[1], accuracy=True) mAP = meter.mAPMeter() feature_all = None label_all = None # training mode model.eval() for i, (shapes, labels) in enumerate(test_loader): batch_time.reset() # bz x 12 x 3 x 224 x 224 labels = labels.long().view(-1) shapes = Variable(shapes) labels = Variable(labels) # shift data to GPU if cfg.cuda: shapes = shapes.cuda() labels = labels.cuda() # forward, backward optimize preds, features = model(shapes) labels_oh = torch.zeros(labels.data.size(0), cfg.class_num)\ .scatter_(1, labels.cpu().data.unsqueeze(1), 1) mAP.add(preds.data, labels_oh) prec.add(preds.data, labels.data) features = features.data.cpu().numpy() labels = labels.data.cpu().numpy().reshape((-1, 1)) if feature_all is None: feature_all = features else: feature_all = np.vstack((feature_all, features)) if label_all is None: label_all = labels else: label_all = np.vstack((label_all, labels)) if i % cfg.print_freq == 0: print('[{0}/{1}]\t' 'Batch Time {batch_time:.3f}\t' 'Epoch Time {data_time:.3f}\t' 'Prec@1 {top1:.3f}\t'.format(i, len(test_loader), batch_time=batch_time.value(), data_time=data_time.value(), top1=prec.value(1))) print('mean class accuracy : {0} '.format(prec.value(1))) print('mAP: %f' % mAP.value()) f['features'][:] = feature_all f['labels'][:] = label_all