예제 #1
0
def test():
    end = time.time()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    losses1 = AverageMeter()

    iou_all = []

    for class_id, class_name in dataset_val.class_ids_pair:

        directory_mesh_cls = os.path.join(directory_mesh, class_id)
        os.makedirs(directory_mesh_cls, exist_ok=True)
        iou = 0

        for i, (im, vx) in enumerate(
                dataset_val.get_all_batches_for_evaluation(
                    args.batch_size, class_id)):
            images = torch.autograd.Variable(im).cuda()
            voxels = vx.numpy()

            batch_iou, vertices, faces = model(images,
                                               voxels=voxels,
                                               task='test')
            iou += batch_iou.sum()

            batch_time.update(time.time() - end)
            end = time.time()

            # save demo images
            for k in range(vertices.size(0)):
                obj_id = (i * args.batch_size + k)
                if obj_id % args.save_freq == 0:
                    mesh_path = os.path.join(directory_mesh_cls,
                                             '%06d.obj' % obj_id)
                    input_path = os.path.join(directory_mesh_cls,
                                              '%06d.png' % obj_id)
                    srf.save_obj(mesh_path, vertices[k], faces[k])
                    imageio.imsave(input_path, img_cvt(images[k]))

            # print loss
            if i % args.print_freq == 0:
                print('Iter: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f}\t'
                      'IoU {2:.3f}\t'.format(
                          i, ((dataset_val.num_data[class_id] * 24) //
                              args.batch_size),
                          batch_iou.mean(),
                          batch_time=batch_time))

        iou_cls = iou / 24. / dataset_val.num_data[class_id] * 100
        iou_all.append(iou_cls)
        print('=================================')
        print('Mean IoU: %.3f for class %s' % (iou_cls, class_name))
        print('\n')

    print('=================================')
    print('Mean IoU: %.3f for all classes' % (sum(iou_all) / len(iou_all)))
예제 #2
0
def train():
    writer = SummaryWriter()
    print("Made summary writer")

    end = time.time()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    for i in range(start_iter, args.num_iterations + 1):
        # adjust learning rate and sigma_val (decay after 150k iter)
        lr = adjust_learning_rate([optimizer],
                                  args.learning_rate,
                                  i,
                                  method=args.lr_type)
        model.set_sigma(adjust_sigma(args.sigma_val, i))

        # TRAIN
        # load images from multi-view
        images_a, images_b, viewpoints_a, viewpoints_b = dataset_train.get_random_batch(
            args.batch_size)
        images_a = images_a.cuda()
        images_b = images_b.cuda()
        viewpoints_a = viewpoints_a.cuda()
        viewpoints_b = viewpoints_b.cuda()

        # soft render images
        render_images, laplacian_loss, flatten_loss = model(
            [images_a, images_b], [viewpoints_a, viewpoints_b], task='train')
        laplacian_loss = laplacian_loss.mean()
        flatten_loss = flatten_loss.mean()

        # compute loss
        loss = multiview_iou_loss(render_images, images_a, images_b) + \
               args.lambda_laplacian * laplacian_loss + \
               args.lambda_flatten * flatten_loss

        # VAL
        # load images from multi-view
        val_images_a, val_images_b, val_viewpoints_a, val_viewpoints_b = dataset_val.get_random_batch(
            args.batch_size)
        val_images_a = val_images_a.cuda()
        val_images_b = val_images_b.cuda()
        val_viewpoints_a = val_viewpoints_a.cuda()
        val_viewpoints_b = val_viewpoints_b.cuda()

        # soft render images
        val_render_images, val_laplacian_loss, val_flatten_loss = model(
            [val_images_a, val_images_b], [val_viewpoints_a, val_viewpoints_b],
            task='train')
        val_laplacian_loss = val_laplacian_loss.mean()
        val_flatten_loss = val_flatten_loss.mean()

        # compute loss
        val_loss = multiview_iou_loss(val_render_images, val_images_a, val_images_b) + \
               args.lambda_laplacian * val_laplacian_loss + \
               args.lambda_flatten * val_flatten_loss

        # RECORD LOSSES
        writer.add_scalar('Loss/train', loss.data.item(), i)
        writer.add_scalar('Loss/val', val_loss.data.item(), i)
        losses.update(loss.data.item(), images_a.size(0))

        # compute gradient and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        # save checkpoint
        if i % args.save_freq == 0:
            model_path = os.path.join(directory_output,
                                      'checkpoint_%07d.pth.tar' % i)
            torch.save(
                {
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, model_path)

        # save demo images
        if i % args.demo_freq == 0:
            demo_image = images_a[0:1]
            demo_path = os.path.join(directory_output, 'demo_%07d.obj' % i)
            demo_v, demo_f = model.reconstruct(demo_image)
            srf.save_obj(demo_path, demo_v[0], demo_f[0])

            fake_img = img_cvt(render_images[0][0])
            real_img = img_cvt(images_a[0])
            imageio.imsave(os.path.join(image_output, '%07d_fake.png' % i),
                           fake_img)
            imageio.imsave(os.path.join(image_output, '%07d_input.png' % i),
                           real_img)


#             writer.add_image('Fake image', fake_img, i)
#             writer.add_image('Real image', real_img, i)
# print
        if i % args.print_freq == 0:
            print('Iter: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f}\t'
                  'Loss {loss.val:.3f}\t'
                  'lr {lr:.6f}\t'
                  'sv {sv:.6f}\t'.format(
                      i,
                      args.num_iterations,
                      batch_time=batch_time,
                      loss=losses,
                      lr=lr,
                      sv=model.renderer.rasterizer.sigma_val))
    writer.close()
예제 #3
0
def train():
    end = time.time()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    iou_losses = []

    for i in range(start_iter, args.num_iterations + 1):
        # adjust learning rate and sigma_val (decay after 150k iter)
        lr = adjust_learning_rate([optimizer],
                                  args.learning_rate,
                                  i,
                                  method=args.lr_type)
        model.set_sigma(adjust_sigma(args.sigma_val, i))

        # load images from multi-view
        images_a, images_b, viewpoints_a, viewpoints_b = dataset_train.get_random_batch(
            args.batch_size)
        images_a = images_a.cuda()
        images_b = images_b.cuda()
        viewpoints_a = viewpoints_a.cuda()
        viewpoints_b = viewpoints_b.cuda()

        # soft render images
        render_images, laplacian_loss, flatten_loss = model(
            [images_a, images_b], [viewpoints_a, viewpoints_b], task='train')
        laplacian_loss = laplacian_loss.mean()
        flatten_loss = flatten_loss.mean()

        iou_loss = multiview_iou_loss(render_images, images_a, images_b)
        iou_losses.append(iou_loss.data.item())

        # compute loss
        loss = iou_loss + \
               args.lambda_laplacian * laplacian_loss + \
               args.lambda_flatten * flatten_loss
        losses.update(loss.data.item(), images_a.size(0))

        # compute gradient and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        # save checkpoint
        if i % args.save_freq == 0:
            model_path = os.path.join(directory_output,
                                      'checkpoint_%07d.pth.tar' % i)
            torch.save(
                {
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, model_path)

        # save demo images
        if i % args.demo_freq == 0:
            demo_image = images_a[0:1]
            demo_path = os.path.join(directory_output, 'demo_%07d.obj' % i)
            demo_v, demo_f = model.reconstruct(demo_image)
            srf.save_obj(demo_path, demo_v[0], demo_f[0])

            imageio.imsave(os.path.join(image_output, '%07d_fake.png' % i),
                           img_cvt(render_images[0][0]))
            imageio.imsave(os.path.join(image_output, '%07d_input.png' % i),
                           img_cvt(images_a[0]))

        # print
        if i % args.print_freq == 0:
            print('Iter: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f}\t'
                  'Loss {loss.val:.3f}\t'
                  'lr {lr:.6f}\t'
                  'sv {sv:.6f}\t'.format(
                      i,
                      args.num_iterations,
                      batch_time=batch_time,
                      loss=losses,
                      lr=lr,
                      sv=model.renderer.rasterizer.sigma_val))

    print(iou_losses)
    with open('loss.txt', 'w') as f:
        f.write(str(iou_losses))
예제 #4
0
def train():
    end = time.time()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    
    env_name = args.experiment_id

    img_a_logger = VisdomLogger('images', env=env_name, port=8097, opts=dict(title='img_a'))
    img_b_logger = VisdomLogger('images', env=env_name, port=8097, opts=dict(title='img_b'))
    rnd_logger = [VisdomLogger('images', env=env_name, port=8097, opts=dict(title='rnd_rgb_{}'.format(_i))) for _i in range(4)]

    for i in range(start_iter, args.num_iterations + 1):
        # adjust learning rate and sigma_val (decay after 150k iter)
        lr = adjust_learning_rate([optimizer], args.learning_rate, i, method=args.lr_type)
        model.set_sigma(adjust_sigma(args.sigma_val, i))

        # load images from multi-view
        images_a, images_b, viewpoints_a, viewpoints_b = dataset_train.get_random_batch(args.batch_size)
        images_a = images_a.cuda()
        images_b = images_b.cuda()
        viewpoints_a = viewpoints_a.cuda()
        viewpoints_b = viewpoints_b.cuda()

        # soft render images
        render_images, laplacian_loss, flatten_loss = model([images_a, images_b], 
                                                            [viewpoints_a, viewpoints_b],
                                                            task='train')
        img_a_logger.log(F.interpolate(images_a.detach().clone(), scale_factor=4).squeeze()[:,:3])
        img_b_logger.log(F.interpolate(images_b.detach().clone(), scale_factor=4).squeeze()[:,:3])
        for _i in range(4):
            rnd_logger[_i].log(F.interpolate(render_images[_i].detach().clone(), scale_factor=4).squeeze()[:,:3])
        import ipdb; ipdb.set_trace()
        laplacian_loss = laplacian_loss.mean()
        flatten_loss = flatten_loss.mean()

        # compute loss
        loss = multiview_iou_loss(render_images, images_a, images_b) + \
               args.lambda_laplacian * laplacian_loss + \
               args.lambda_flatten * flatten_loss
        losses.update(loss.data.item(), images_a.size(0))

        # compute gradient and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        # save checkpoint
        if i % args.save_freq == 0:
            model_path = os.path.join(directory_output, 'checkpoint_%07d.pth.tar'%i)
            torch.save({
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                }, model_path)

        # save demo images
        if i % args.demo_freq == 0:
            demo_image = images_a[0:1]
            demo_path = os.path.join(directory_output, 'demo_%07d.obj'%i)
            demo_v, demo_f = model.reconstruct(demo_image)
            srf.save_obj(demo_path, demo_v[0], demo_f[0])
            
            imageio.imsave(os.path.join(image_output, '%07d_fake.png' % i), img_cvt(render_images[0][0]))
            imageio.imsave(os.path.join(image_output, '%07d_input.png' % i), img_cvt(images_a[0]))

        # print
        if i % args.print_freq == 0:
            print('Iter: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f}\t'
                  'Loss {loss.val:.3f}\t'
                  'lr {lr:.6f}\t'
                  'sv {sv:.6f}\t'.format(i, args.num_iterations,
                                         batch_time=batch_time, loss=losses, 
                                         lr=lr, sv=model.renderer.rasterizer.sigma_val))
예제 #5
0
def train():
    end = time.time()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    run_times = -1
    cnt = 0
    jt.sync_all(True)
    sta = time.time()
    for i in range(start_iter, args.num_iterations + 1):
        # adjust learning rate and sigma_val (decay after 150k iter)
        lr = adjust_learning_rate([optimizer],
                                  args.learning_rate,
                                  i,
                                  method=args.lr_type)
        model.set_sigma(adjust_sigma(args.sigma_val, i))

        # load images from multi-view
        images_a, images_b, viewpoints_a, viewpoints_b = dataset_train.get_random_batch(
            args.batch_size)

        # soft render images
        render_images, laplacian_loss, flatten_loss = model(
            [images_a, images_b], [viewpoints_a, viewpoints_b], task='train')
        laplacian_loss = laplacian_loss.mean()
        flatten_loss = flatten_loss.mean()

        # compute loss
        loss = multiview_iou_loss(render_images, images_a, images_b) + \
               args.lambda_laplacian * laplacian_loss + \
               args.lambda_flatten * flatten_loss

        losses.update(loss.data, images_a.size(0))

        # compute gradient and optimize
        optimizer.step(loss)

        batch_time.update(time.time() - end)
        end = time.time()

        if run_times != -1:
            if cnt == run_times:
                jt.sync_all(True)
                print(
                    f"Costs {time.time() - sta} secs for running {run_times} times."
                )
                exit(0)
            else:
                cnt += 1
                continue

        # save checkpoint
        if i % args.save_freq == 0:
            model_path = os.path.join(directory_output,
                                      'checkpoint_%07d.pkl' % i)
            model.save(model_path)

        # save demo images
        if i % args.demo_freq == 0:
            demo_image = images_a[0:1]
            demo_path = os.path.join(directory_output, 'demo_%07d.obj' % i)
            demo_v, demo_f = model.reconstruct(demo_image)
            jr.save_obj(demo_path, demo_v[0], demo_f[0])

            imageio.imsave(os.path.join(image_output, '%07d_fake.png' % i),
                           img_cvt(render_images[0][0][None, :, :]))
            imageio.imsave(os.path.join(image_output, '%07d_input.png' % i),
                           img_cvt(images_a[0]))

        # print
        if i % args.print_freq == 0:
            print('Iter: [{0}/{1}]\t'
                  'Time {batch_time:.3f}\t'
                  'Loss {loss:.3f}\t'
                  'lr {lr:.6f}\t'
                  'sv 1\t'.format(i,
                                  args.num_iterations,
                                  batch_time=batch_time.val,
                                  loss=losses.val.tolist()[0],
                                  lr=lr,
                                  sv=model.renderer.rasterizer.sigma_val))