j, data = next(enumerate(testdataloader, 0))
            points, target = data
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            classifier = classifier.eval()
            pred, _, _ = classifier(points)
            pred = pred.view(-1, num_classes)
            target = target.view(-1, 1)[:, 0] - 1
            loss = F.nll_loss(pred, target)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            print('[%d: %d/%d] %s loss: %f accuracy: %f' %
                  (epoch, i, num_batch, blue('test'), loss.item(),
                   correct.item() / float(opt.batchSize * 2500)))

    torch.save(classifier.state_dict(),
               '%s/seg_model_%s_%d.pth' % (opt.outf, opt.class_choice, epoch))

## benchmark mIOU
shape_ious = []
for i, data in tqdm(enumerate(testdataloader, 0)):
    points, target = data
    points = points.transpose(2, 1)
    points, target = points.cuda(), target.cuda()
    classifier = classifier.eval()
    pred, _, _ = classifier(points)
    pred_choice = pred.data.max(2)[1]

    pred_np = pred_choice.cpu().data.numpy()
    target_np = target.cpu().data.numpy() - 1
예제 #2
0
            points, target = data
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            classifier = classifier.eval()
            pred, _, _ = classifier(points)
            pred = pred.view(-1, num_classes)
            target = target.view(-1, 1)[:, 0] - 1
            loss = F.nll_loss(pred, target)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            print('[%d: %d/%d] %s loss: %f accuracy: %f' %
                  (epoch, i, num_batch, blue('test'), loss.item(),
                   correct.item() / float(opt.batchSize * 2500)))

    torch.save(
        classifier.state_dict(), '%s/seg_model_%s_%d.pth' %
        (opt.outf, opt.class_choice, epoch))  #保存在seg/seg_model_Chair_1.pth

## benchmark mIOU #mIOU = TP/(FP+FN+TP)
shape_ious = []  #测试
for i, data in tqdm(enumerate(testdataloader, 0)):  #tqdm进度条
    points, target = data
    points = points.transpose(2, 1)
    points, target = points.cuda(), target.cuda()
    classifier = classifier.eval()
    pred, _, _ = classifier(points)
    pred_choice = pred.data.max(2)[1]

    pred_np = pred_choice.cpu().data.numpy()
    target_np = target.cpu().data.numpy() - 1
예제 #3
0
                points, target = data
                points = points.transpose(2, 1)
                points, target = points.cuda(), target.cuda()
                classifier = classifier.eval()
                pred, _, _ = classifier(points)
                pred = pred.view(-1, num_classes)
                target = target.view(-1, 1)[:, 0] - 1
                loss = F.nll_loss(pred, target)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(target.data).cpu().sum()
                print('[%d: %d/%d] %s loss: %f accuracy: %f' %
                      (epoch, i, num_batch, blue('test'), loss.item(),
                       correct.item() / float(opt.batchSize * 2500)))

        torch.save(
            classifier.state_dict(),
            '%s/seg_model_%s_%d.pth' % (opt.outf, opt.class_choice, epoch))

    ## benchmark mIOU
    shape_ious = []
    for i, data in tqdm(enumerate(testdataloader, 0)):
        points, target = data
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()
        classifier = classifier.eval()
        pred, _, _ = classifier(points)
        pred_choice = pred.data.max(2)[1]

        pred_np = pred_choice.cpu().data.numpy()
        target_np = target.cpu().data.numpy() - 1
                writer.add_scalar('validation errG_global', errG_g_eval.item(),
                                  27 * epoch + n)
                writer.add_scalar('validation errG_local', errG_l_eval.item(),
                                  27 * epoch + n)
                writer.add_scalar('validation chamfer_loss',
                                  chamferloss_eval.item(), 27 * epoch + n)
                writer.add_scalar('validation errG_loss', loss_eval.item(),
                                  27 * epoch + n)

                #for name, param in globalD.named_parameters():
                #    writer.add_histogram(name, param.clone().cpu().data.numpy(), 27 * epoch + n)
                #for name, param in localD.named_parameters():
                #    writer.add_histogram(name, param.clone().cpu().data.numpy(), 27 * epoch + n)
                #for name, param in netG.named_parameters():
                #    writer.add_histogram(name, param.clone().cpu().data.numpy(), 27 * epoch + n)

        schedulerG.step()
        schedulerD.step()

    if loss.item() < best_loss:
        best_loss = loss.item()
        torch.save(netG.state_dict(),
                   '%s/com_model_G_%f_%d.pth' % (opt.outf, loss.item(), epoch))
        torch.save(
            localD.state_dict(),
            '%s/com_model_localD_%f_%d.pth' % (opt.outf, errD.item(), epoch))
        torch.save(
            globalD.state_dict(),
            '%s/com_model_globalD_%f_%d.pth' % (opt.outf, errD.item(), epoch))
예제 #5
0
        loss = F.nll_loss(pred, target)
        loss_buf.append(loss.detach().cpu().numpy())
        if opt.feature_transform:
            loss += feature_transform_reguliarzer(trans_feat) * 0.001
        loss.backward()
        optimizer.step()
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum()
        # print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), correct.item()/float(opt.batchSize * 2500)))

    # finish one epoch
    print('Epoch %d: Train loss: %.2f, Time: %.2f s' % (epoch, np.mean(loss_buf), time.time() - epoch_start_time))
    # every ten epoch: save model & evaluate

    if (epoch + 1) % 10 == 0:
        torch.save(classifier.state_dict(), '%s/seg_model_%s_%d.pth' % (opt.outf, opt.class_choice, epoch + 1))
        # evaluate on test set
        loss_buf = []
        correct_buf = []
        iou_buf = []
        for i, data in enumerate(testdataloader, 0):
            points, target = data
            points, target = Variable(points), Variable(target)
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            pred, _, _ = classifier(points)
            pred = pred.view(-1, num_classes)
            target = target.view(-1, 1)[:, 0] - 1
            loss = F.nll_loss(pred, target)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()