コード例 #1
0
ファイル: demo.py プロジェクト: AnonymousUser1111/URA
def main():
    parser = argparse.ArgumentParser()    
    parser.add_argument('--cfg_file', type=str,default = './configs/shapenet/shapenet_1c_02691156.yaml')
    parser.add_argument('--checkpoint', type=str,default = './data/models/02691156-R.pth.tar')
    parser.add_argument('--init_obj', type=str,default = './data/sphere_642.obj')
    parser.add_argument('--input_path', type=str,default = './data/demo/')

    args = parser.parse_args()
    
    # load cfg
    f = open(args.cfg_file)
    cfg = yaml.load(f)
    f.close()
    
    # make model
    model_R = models.Reconstructor(args.init_obj,cfg).to(device)
    checkpoint = torch.load(args.checkpoint)
    model_R.load_state_dict(checkpoint['model_R'])
    model_R.eval()
    img_files = os.listdir(args.input_path)

    for i in range(len(img_files)):
        print(str(i) + '/' + str(len(img_files)))
        temp_img_path = os.path.join(args.input_path , img_files[i])
        filename, file_extension = os.path.splitext(img_files[i])
        
        if not (file_extension == '.png' or file_extension == '.jpg'):
            continue
        temp_img = misc.imread(temp_img_path)
        temp_img = temp_img.transpose(2,0,1)
        img = torch.FloatTensor(temp_img.astype('float32') / 255.).unsqueeze(0).to(device)
        
    
        vertices, faces,_ = model_R.reconstruct(img)
        srf.save_obj(os.path.join(args.input_path,filename+'.obj'),vertices[0],faces[0])
コード例 #2
0
ファイル: test_vsl.py プロジェクト: mjyip8/cs231a_softra
def test():
    end = time.time()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    losses1 = AverageMeter()

    iou_all = []

    for class_id, class_name in dataset_val.class_ids_pair:

        directory_mesh_cls = os.path.join(directory_mesh, class_id)
        os.makedirs(directory_mesh_cls, exist_ok=True)
        iou = 0

        for i, (im, vx) in enumerate(
                dataset_val.get_all_batches_for_evaluation(
                    args.batch_size, class_id)):
            images = torch.autograd.Variable(im).cuda()
            voxels = vx.numpy()

            batch_iou, vertices, faces = model(images,
                                               voxels=voxels,
                                               task='test')
            iou += batch_iou.sum()

            batch_time.update(time.time() - end)
            end = time.time()

            # save demo images
            for k in range(vertices.size(0)):
                obj_id = (i * args.batch_size + k)
                if obj_id % args.save_freq == 0:
                    mesh_path = os.path.join(directory_mesh_cls,
                                             '%06d.obj' % obj_id)
                    input_path = os.path.join(directory_mesh_cls,
                                              '%06d.png' % obj_id)
                    srf.save_obj(mesh_path, vertices[k], faces[k])
                    imageio.imsave(input_path, img_cvt(images[k]))

            # print loss
            if i % args.print_freq == 0:
                print('Iter: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f}\t'
                      'IoU {2:.3f}\t'.format(
                          i, ((dataset_val.num_data[class_id] * 24) //
                              args.batch_size),
                          batch_iou.mean(),
                          batch_time=batch_time))

        iou_cls = iou / 24. / dataset_val.num_data[class_id] * 100
        iou_all.append(iou_cls)
        print('=================================')
        print('Mean IoU: %.3f for class %s' % (iou_cls, class_name))
        print('\n')

    print('=================================')
    print('Mean IoU: %.3f for all classes' % (sum(iou_all) / len(iou_all)))
コード例 #3
0
ファイル: mesh.py プロジェクト: Rubikplayer/SoftRas
 def save_obj(self, filename_obj, save_texture=False, texture_res_out=16):
     if self.batch_size != 1:
         raise ValueError('Could not save when batch size >= 1')
     if save_texture:
         srf.save_obj(filename_obj, self.vertices[0], self.faces[0], 
                      textures=self.textures[0],
                      texture_res=texture_res_out, texture_type=self.texture_type)
     else:
         srf.save_obj(filename_obj, self.vertices[0], self.faces[0], textures=None)
コード例 #4
0
for each_key in category_to_IoU:
    save_gen_model_path = os.path.join(save_dir,
                                       each_key + '_generated_model.obj')
    save_ori_model_path = os.path.join(save_dir,
                                       each_key + '_original_model.obj')

    file_list = sorted(os.listdir(os.path.join(root_dir, each_key)))
    if '000000__broken__67ada28ebc79cc75a056f196c127ed77' in file_list:
        file_list.remove('000000__broken__67ada28ebc79cc75a056f196c127ed77')
    if '000000__broken__b65b590a565fa2547e1c85c5c15da7fb' in file_list:
        file_list.remove('000000__broken__b65b590a565fa2547e1c85c5c15da7fb')

    image_path = os.path.join(root_dir, each_key,
                              file_list[category_to_argmin[each_key]],
                              'rendered_0.png')
    original_model_path = os.path.join(root_dir, each_key,
                                       file_list[category_to_argmin[each_key]],
                                       'model.obj')
    image = PIL.Image.open(image_path)
    image = image.resize((64, 64))
    image = np.asanyarray(image)[None, :, :, :]
    image = image.transpose((0, 3, 1, 2))
    image = np.ascontiguousarray(image)
    image = torch.from_numpy(image.astype('float32') / 255.)
    image = torch.autograd.Variable(image).cuda()
    vertice_generate, face_generate = model.reconstruct(image)
    srf.save_obj(save_gen_model_path, vertice_generate[0], face_generate[0])
    shutil.copy(original_model_path, save_ori_model_path)
    print(each_key, category_to_num[each_key], category_to_max[each_key],
          category_to_min[each_key], category_to_mean[each_key])
コード例 #5
0
def train():
    writer = SummaryWriter()
    print("Made summary writer")

    end = time.time()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    for i in range(start_iter, args.num_iterations + 1):
        # adjust learning rate and sigma_val (decay after 150k iter)
        lr = adjust_learning_rate([optimizer],
                                  args.learning_rate,
                                  i,
                                  method=args.lr_type)
        model.set_sigma(adjust_sigma(args.sigma_val, i))

        # TRAIN
        # load images from multi-view
        images_a, images_b, viewpoints_a, viewpoints_b = dataset_train.get_random_batch(
            args.batch_size)
        images_a = images_a.cuda()
        images_b = images_b.cuda()
        viewpoints_a = viewpoints_a.cuda()
        viewpoints_b = viewpoints_b.cuda()

        # soft render images
        render_images, laplacian_loss, flatten_loss = model(
            [images_a, images_b], [viewpoints_a, viewpoints_b], task='train')
        laplacian_loss = laplacian_loss.mean()
        flatten_loss = flatten_loss.mean()

        # compute loss
        loss = multiview_iou_loss(render_images, images_a, images_b) + \
               args.lambda_laplacian * laplacian_loss + \
               args.lambda_flatten * flatten_loss

        # VAL
        # load images from multi-view
        val_images_a, val_images_b, val_viewpoints_a, val_viewpoints_b = dataset_val.get_random_batch(
            args.batch_size)
        val_images_a = val_images_a.cuda()
        val_images_b = val_images_b.cuda()
        val_viewpoints_a = val_viewpoints_a.cuda()
        val_viewpoints_b = val_viewpoints_b.cuda()

        # soft render images
        val_render_images, val_laplacian_loss, val_flatten_loss = model(
            [val_images_a, val_images_b], [val_viewpoints_a, val_viewpoints_b],
            task='train')
        val_laplacian_loss = val_laplacian_loss.mean()
        val_flatten_loss = val_flatten_loss.mean()

        # compute loss
        val_loss = multiview_iou_loss(val_render_images, val_images_a, val_images_b) + \
               args.lambda_laplacian * val_laplacian_loss + \
               args.lambda_flatten * val_flatten_loss

        # RECORD LOSSES
        writer.add_scalar('Loss/train', loss.data.item(), i)
        writer.add_scalar('Loss/val', val_loss.data.item(), i)
        losses.update(loss.data.item(), images_a.size(0))

        # compute gradient and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        # save checkpoint
        if i % args.save_freq == 0:
            model_path = os.path.join(directory_output,
                                      'checkpoint_%07d.pth.tar' % i)
            torch.save(
                {
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, model_path)

        # save demo images
        if i % args.demo_freq == 0:
            demo_image = images_a[0:1]
            demo_path = os.path.join(directory_output, 'demo_%07d.obj' % i)
            demo_v, demo_f = model.reconstruct(demo_image)
            srf.save_obj(demo_path, demo_v[0], demo_f[0])

            fake_img = img_cvt(render_images[0][0])
            real_img = img_cvt(images_a[0])
            imageio.imsave(os.path.join(image_output, '%07d_fake.png' % i),
                           fake_img)
            imageio.imsave(os.path.join(image_output, '%07d_input.png' % i),
                           real_img)


#             writer.add_image('Fake image', fake_img, i)
#             writer.add_image('Real image', real_img, i)
# print
        if i % args.print_freq == 0:
            print('Iter: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f}\t'
                  'Loss {loss.val:.3f}\t'
                  'lr {lr:.6f}\t'
                  'sv {sv:.6f}\t'.format(
                      i,
                      args.num_iterations,
                      batch_time=batch_time,
                      loss=losses,
                      lr=lr,
                      sv=model.renderer.rasterizer.sigma_val))
    writer.close()
コード例 #6
0
def train():
    end = time.time()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    iou_losses = []

    for i in range(start_iter, args.num_iterations + 1):
        # adjust learning rate and sigma_val (decay after 150k iter)
        lr = adjust_learning_rate([optimizer],
                                  args.learning_rate,
                                  i,
                                  method=args.lr_type)
        model.set_sigma(adjust_sigma(args.sigma_val, i))

        # load images from multi-view
        images_a, images_b, viewpoints_a, viewpoints_b = dataset_train.get_random_batch(
            args.batch_size)
        images_a = images_a.cuda()
        images_b = images_b.cuda()
        viewpoints_a = viewpoints_a.cuda()
        viewpoints_b = viewpoints_b.cuda()

        # soft render images
        render_images, laplacian_loss, flatten_loss = model(
            [images_a, images_b], [viewpoints_a, viewpoints_b], task='train')
        laplacian_loss = laplacian_loss.mean()
        flatten_loss = flatten_loss.mean()

        iou_loss = multiview_iou_loss(render_images, images_a, images_b)
        iou_losses.append(iou_loss.data.item())

        # compute loss
        loss = iou_loss + \
               args.lambda_laplacian * laplacian_loss + \
               args.lambda_flatten * flatten_loss
        losses.update(loss.data.item(), images_a.size(0))

        # compute gradient and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        # save checkpoint
        if i % args.save_freq == 0:
            model_path = os.path.join(directory_output,
                                      'checkpoint_%07d.pth.tar' % i)
            torch.save(
                {
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, model_path)

        # save demo images
        if i % args.demo_freq == 0:
            demo_image = images_a[0:1]
            demo_path = os.path.join(directory_output, 'demo_%07d.obj' % i)
            demo_v, demo_f = model.reconstruct(demo_image)
            srf.save_obj(demo_path, demo_v[0], demo_f[0])

            imageio.imsave(os.path.join(image_output, '%07d_fake.png' % i),
                           img_cvt(render_images[0][0]))
            imageio.imsave(os.path.join(image_output, '%07d_input.png' % i),
                           img_cvt(images_a[0]))

        # print
        if i % args.print_freq == 0:
            print('Iter: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f}\t'
                  'Loss {loss.val:.3f}\t'
                  'lr {lr:.6f}\t'
                  'sv {sv:.6f}\t'.format(
                      i,
                      args.num_iterations,
                      batch_time=batch_time,
                      loss=losses,
                      lr=lr,
                      sv=model.renderer.rasterizer.sigma_val))

    print(iou_losses)
    with open('loss.txt', 'w') as f:
        f.write(str(iou_losses))
コード例 #7
0
def train():
    end = time.time()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    
    env_name = args.experiment_id

    img_a_logger = VisdomLogger('images', env=env_name, port=8097, opts=dict(title='img_a'))
    img_b_logger = VisdomLogger('images', env=env_name, port=8097, opts=dict(title='img_b'))
    rnd_logger = [VisdomLogger('images', env=env_name, port=8097, opts=dict(title='rnd_rgb_{}'.format(_i))) for _i in range(4)]

    for i in range(start_iter, args.num_iterations + 1):
        # adjust learning rate and sigma_val (decay after 150k iter)
        lr = adjust_learning_rate([optimizer], args.learning_rate, i, method=args.lr_type)
        model.set_sigma(adjust_sigma(args.sigma_val, i))

        # load images from multi-view
        images_a, images_b, viewpoints_a, viewpoints_b = dataset_train.get_random_batch(args.batch_size)
        images_a = images_a.cuda()
        images_b = images_b.cuda()
        viewpoints_a = viewpoints_a.cuda()
        viewpoints_b = viewpoints_b.cuda()

        # soft render images
        render_images, laplacian_loss, flatten_loss = model([images_a, images_b], 
                                                            [viewpoints_a, viewpoints_b],
                                                            task='train')
        img_a_logger.log(F.interpolate(images_a.detach().clone(), scale_factor=4).squeeze()[:,:3])
        img_b_logger.log(F.interpolate(images_b.detach().clone(), scale_factor=4).squeeze()[:,:3])
        for _i in range(4):
            rnd_logger[_i].log(F.interpolate(render_images[_i].detach().clone(), scale_factor=4).squeeze()[:,:3])
        import ipdb; ipdb.set_trace()
        laplacian_loss = laplacian_loss.mean()
        flatten_loss = flatten_loss.mean()

        # compute loss
        loss = multiview_iou_loss(render_images, images_a, images_b) + \
               args.lambda_laplacian * laplacian_loss + \
               args.lambda_flatten * flatten_loss
        losses.update(loss.data.item(), images_a.size(0))

        # compute gradient and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        # save checkpoint
        if i % args.save_freq == 0:
            model_path = os.path.join(directory_output, 'checkpoint_%07d.pth.tar'%i)
            torch.save({
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                }, model_path)

        # save demo images
        if i % args.demo_freq == 0:
            demo_image = images_a[0:1]
            demo_path = os.path.join(directory_output, 'demo_%07d.obj'%i)
            demo_v, demo_f = model.reconstruct(demo_image)
            srf.save_obj(demo_path, demo_v[0], demo_f[0])
            
            imageio.imsave(os.path.join(image_output, '%07d_fake.png' % i), img_cvt(render_images[0][0]))
            imageio.imsave(os.path.join(image_output, '%07d_input.png' % i), img_cvt(images_a[0]))

        # print
        if i % args.print_freq == 0:
            print('Iter: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f}\t'
                  'Loss {loss.val:.3f}\t'
                  'lr {lr:.6f}\t'
                  'sv {sv:.6f}\t'.format(i, args.num_iterations,
                                         batch_time=batch_time, loss=losses, 
                                         lr=lr, sv=model.renderer.rasterizer.sigma_val))
コード例 #8
0
end = time.time()

batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses1 = AverageMeter()

iou_all = []

images = []

img_list = sorted(os.listdir(IMG_PATH))

for img_name in img_list:
    img = PIL.Image.open(os.path.join(IMG_PATH, img_name))
    img = np.asanyarray(img)
    images.append(img)

images = np.array(images)
images = images.transpose((0, 3, 1, 2))
images = np.ascontiguousarray(images)
images = torch.from_numpy(images.astype('float32') / 255.)
images = torch.autograd.Variable(images).cuda()

vertices, faces = model.reconstruct(images)
for k in range(len(img_list)):
    print(k)
    mesh_path = os.path.join(directory_output, img_list[k][:-4] + ".obj")
    srf.save_obj(mesh_path, vertices[k], faces[k])