Ejemplo n.º 1
0
def trainImageFilter(scene, benchmark=False):
    expr_dir = os.path.join(opt.output, opt.name)
    if not os.path.isdir(expr_dir):
        os.makedirs(expr_dir)

    trainer = Trainer(opt, scene)
    trainer.setup(opt, scene.cloud)

    logInterval = math.floor(1+sum(opt.steps)//20)
    renderForwardTime = 0.0
    lossTime = 0.0
    optimizerStep = 0.0

    with torch.autograd.detect_anomaly():
        with open(os.path.join(expr_dir, "loss.csv"), 'w') as loss_log:
            for c in range(opt.cycles):
                # creat new reference
                tb = c*sum(opt.steps)+opt.startingStep
                te = (c+1)*sum(opt.steps)+opt.startingStep
                t = tb

                with torch.no_grad():
                    trainer.create_reference(scene)
                    trainer.initiate_cycle()
                    for i, pair in enumerate(zip(trainer.groundtruths, trainer.predictions)):
                        post, pre = pair
                        diff = post - pre
                        saveAsPng(pre.cpu(), os.path.join(expr_dir, 't%03d_cam%d_init.png' % (t, i)))
                        saveAsPng(post.cpu(), os.path.join(expr_dir, 't%03d_cam%d_gt.png' % (t, i)))
                        saveAsPng(diff.cpu(), os.path.join(expr_dir, 't%03d_cam%d_diff.png' % (t, i)))

                for t in range(tb, te):
                    if t % logInterval == 0 and not benchmark:
                        writeScene(scene, os.path.join(expr_dir, 't%03d' % t +
                                                       '_values.json'), os.path.join(expr_dir, 't%03d' % t + '.ply'))

                    trainer.optimize_parameters()
                    if t % logInterval == 0 and not benchmark:
                        for i, prediction in enumerate(trainer.predictions):
                            saveAsPng(prediction.detach().cpu()[0], os.path.join(expr_dir, 't%03d_cam%d' % (t, i) + ".png"))

                    if not benchmark:
                        loss_str = ",".join(["%.3f" % (100*v) for v in trainer.loss_image])
                        reg_str = ",".join(["%.3f" % (100*v) for v in trainer.loss_reg])
                        entries = [trainer.modifier] + [loss_str] + [reg_str]
                        loss_log.write(",".join(entries)+"\n")
                        print("{:03d} {}: lr {} loss ({}) \n         :       reg ({})".format(
                            t, trainer.modifier, trainer.lr, loss_str, reg_str))

                trainer.finish_cycle()

    writeScene(scene, os.path.join(expr_dir, 'final_scene.json'),
               os.path.join(expr_dir, 'final_cloud.ply'))
Ejemplo n.º 2
0
            keyName = os.path.join(os.path.join(opt.output, pointRelPath[:-4]))
            points = readCloud(pointPath, device="cpu")
            scene.loadPoints(points)
            fileName = getBasename(pointPath)
            splatter.setCloud(scene.cloud)
            successful_views= []
            rendered = []
            for i, cam in enumerate(scene.cameras):
                splatter.setCamera(i)
                result = splatter.render()
                if result is None:
                    print("splatted a None")
                    continue
                result = result.detach()[0]
                rendered.append(result)
            print(pointRelPath)
            for i, gt in enumerate(rendered):
                if splatter.shading == "albedo":
                    cmax = 1
                else:
                    cmax = None
                saveAsPng(gt.cpu(), keyName + '_cam%02d.png' % i, cmin=0, cmax=cmax)
            # stacked = torch.stack(rendered, dim=0)
            # np.save(keyName+'_views.npy', stacked.cpu().numpy())

    all_views = [camer.world2CameraMatrix_test(camer.rotation, camer.position).detach().cpu().numpy() for camer in scene.cameras]
    all_views_intrinsics = [camer.projectionMatrix().detach().cpu().numpy() for camer in scene.cameras]
    np.save(keyName+'_all_views', all_views)
    np.save(keyName+'_all_views_intrinsics', all_views_intrinsics)

    #print(all_views[0])
Ejemplo n.º 3
0
                for ang in range(0, 360, 3):
                    rot = getRotationMatrix(
                        torch.tensor(ang * np.pi / 180).to(
                            device=splatter.pointRotation.device))
                    splatter.pointRotation.data.copy_(rot.unsqueeze(0))
                    splatter.m2w = batchAffineMatrix(splatter.pointRotation,
                                                     splatter.pointPosition,
                                                     splatter.pointScale)

                    # set camera to look at the center
                    splatter.setCamera(i)
                    result = splatter.render()
                    if result is None:
                        continue
                    result = result.detach()[0]

                    if splatter.shading == "albedo":
                        cmax = 1
                        saveAsPng(result.cpu(),
                                  keyName + '_cam%02d_%03d.png' % (i, cnt),
                                  cmin=0,
                                  cmax=cmax)
                    else:
                        saveAsPng(result.cpu(),
                                  keyName + '_cam%02d_%03d.png' % (i, cnt),
                                  cmin=0)

                    cnt += 1

            print(pointRelPath)
Ejemplo n.º 4
0
def trainShapeOnImage(scene, refScene, opt, baseline=False):
    expr_dir = os.path.join(opt.output, opt.name)
    if not os.path.isdir(expr_dir):
        os.makedirs(expr_dir)

    trainer = Trainer(opt, scene)
    trainer.setup(opt, scene.cloud)

    logInterval = math.floor(1+sum(opt.steps)//20)
    renderForwardTime = 0.0
    lossTime = 0.0
    optimizerStep = 0.0

    log_variables = {}
    writeScene(refScene, os.path.join(expr_dir, 't000_scene_gt.json'), os.path.join(expr_dir, "gt.ply"))
    with open(expr_dir + "/loss.csv", 'w') as loss_log:
        for c in range(opt.cycles):
            # creat new reference
            tb = c*sum(opt.steps)+opt.startingStep
            te = (c+1)*sum(opt.steps)+opt.startingStep
            t = tb
            with torch.no_grad():
                trainer.create_reference(refScene)
                writeScene(refScene, os.path.join(
                    expr_dir, 't%03d_scene_gt.json' % t))
                writeCameras(refScene, os.path.join(
                    expr_dir, 't%03d_cameras.ply' % t))
                for i, gt in enumerate(trainer.groundtruths):
                    saveAsPng(gt.cpu()[0], os.path.join(
                        expr_dir, 't%03d_cam%d_gt.png' % (t, i)))
                trainer.initiate_cycle()

            for t in range(tb, te):
                if t % logInterval == 0:
                    writeScene(scene, os.path.join(
                        expr_dir, 't%03d_scene.json' % t), os.path.join(expr_dir, "t%03d.ply" % t))

                trainer.optimize_parameters()
                for k in tmp_saved_v:
                    if k == "renderable_idx":
                        continue
                    if k not in log_variables:
                        log_variables[k] = tmp_saved_v[k].detach().numpy()
                    else:
                        log_variables[k] = np.concatenate(
                            [log_variables[k], tmp_saved_v[k].detach().numpy()], axis=0)



                if t % logInterval == 0:
                    for i, prediction in enumerate(trainer.predictions):
                        saveAsPng(prediction.detach().cpu()[0], os.path.join(
                            expr_dir, 't%03d_cam%d' % (t, i) + ".png"))

                loss_str = ",".join(
                    ["%.3f" % v for v in trainer.loss_image])
                reg_str = ",".join(["%.3f" % v for v in trainer.loss_reg])
                entries = [trainer.modifier] + [loss_str] + [reg_str]
                loss_log.write(",".join(entries)+"\n")
                print("{:03d} {}: lr {} loss ({}) \n         :       reg ({})".format(
                    t, trainer.modifier, trainer.lr, loss_str, reg_str))

            trainer.finish_cycle()

    writeScene(scene, os.path.join(expr_dir, 'final_scene.json'),
               os.path.join(expr_dir, 'final_cloud.ply'))
    np.save(os.path.join(expr_dir, "log_variables"), log_variables)
Ejemplo n.º 5
0
def trainShapeOnImage(scene, refScene, opt, baseline=False, benchmark=False):
    expr_dir = os.path.join(opt.output, opt.name)
    if not os.path.isdir(expr_dir):
        os.makedirs(expr_dir)

    trainer = Trainer(opt, scene)
    trainer.setup(opt, scene.cloud)

    logInterval = (1+sum(opt.steps))//20
    renderForwardTime = 0.0
    lossTime = 0.0
    optimizerStep = 0.0

    with open(os.path.join(expr_dir, "loss.csv"), 'w') as loss_log:
        learnTick = time.time()
        for c in range(opt.cycles):
            # creat new reference
            tb = c*sum(opt.steps)+opt.startingStep
            te = (c+1)*sum(opt.steps)+opt.startingStep
            t = tb

            with torch.no_grad():
                # render reference
                trainer.create_reference(refScene)

                # render prediction
                maxDiffs = []
                selectedCameras = []
                for i in range(len(refScene.cameras)):
                    trainer.forward(i)
                    prediction = trainer.predictions[i]
                    maxDiff, selectedCamera = viewFromError(opt.genCamera, trainer.groundtruths[i][0], prediction.detach()[0],
                                                            trainer.model._localPoints.detach(), trainer.model._projPoints.detach(),
                                                            trainer.model,
                                                            offset=opt.camOffset*(0.997**c))
                    maxDiffs.append(maxDiff)
                    selectedCameras.append(selectedCamera)

                maxId = torch.stack(maxDiffs, dim=0).argmax()
                selectedCamera = selectedCameras[maxId]
                # render again
                trainer.create_reference(refScene, selectedCamera)
                writeScene(refScene, os.path.join(expr_dir, 't%03d_scene_gt.json' % t))
                writeCameras(refScene, os.path.join(expr_dir, 't%03d_cameras.ply' % t))
                for i, gt in enumerate(trainer.groundtruths):
                    if gt is not None:
                        saveAsPng(gt.cpu()[0], os.path.join(expr_dir, 't%03d_cam%d_gt.png' % (t, i)))
                trainer.initiate_cycle()

            for t in range(tb, te):
                if t % logInterval == 0 and not benchmark:
                    writeScene(scene, os.path.join(expr_dir, 't%03d_scene.json' % t), os.path.join(expr_dir, "t%03d.ply" % t))

                trainer.optimize_parameters()

                if t % logInterval == 0 and not benchmark:
                    for i, prediction in enumerate(trainer.predictions):
                        saveAsPng(prediction.detach().cpu()[0], os.path.join(expr_dir, 't%03d_cam%d' % (t, i) + ".png"))

                if not benchmark:
                    loss_str = ",".join(["%.3f" % v for v in trainer.loss_image])
                    reg_str = ",".join(["%.3f" % v for v in trainer.loss_reg])
                    entries = [trainer.modifier] + [loss_str] + [reg_str]
                    loss_log.write(",".join(entries)+"\n")
                    print("{:03d} {}: lr {} loss ({}) \n         :       reg ({})".format(
                        t, trainer.modifier, trainer.lr, loss_str, reg_str))

            trainer.finish_cycle()

    # outlier removal
    with torch.no_grad():
        # re-project
        scores = []
        for i, gt in enumerate(trainer.groundtruths):
            if gt is None:
                continue
            trainer.model.setCamera(i)
            trainer.model.convertToCameraSpace()
            projectedPoints = trainer.model.camera.projectPoints(trainer.model.cameraPoints)
            score = removeOutlier(gt, projectedPoints, sigma=100)

            saveAsPng(gt.cpu()[0], os.path.join(expr_dir, 'clean_gt_cam%d.png' % i))
            save_ply_property(trainer.model.localPoints.cpu()[0].detach(), 1-score.cpu(), os.path.join(expr_dir, 'clean_score_cam % d.ply' % i), property_max=1,
                              normals=trainer.model.localNormals.detach().cpu()[0], cmap_name="gnuplot2")
            rendered = trainer.model.render()[0]
            saveAsPng(rendered.cpu(), os.path.join(expr_dir, 'clean_pred_cam%d.png' % i))
            scores.append(score)

        scores = torch.stack(scores, dim=0)
        scores = torch.prod(scores, dim=0)
        _, indices = torch.topk(scores, int(trainer.model.localPoints.shape[1]*0.99), dim=0)
        # _, indices = torch.topk(score, int(trainer.model.localPoints.shape[1]*0.99), dim=0)
        newPoints = torch.index_select(trainer.model.localPoints.data, 1, indices)
        newNormals = torch.index_select(trainer.model.localNormals.data, 1, indices)
        newColors = torch.index_select(trainer.model.pointColors.data, 1, indices)

        scene.cloud.localPoints = newPoints[0]
        scene.cloud.localNormals = newNormals[0]
        scene.cloud.color = newColors[0]

    writeScene(scene, os.path.join(expr_dir, 'final_scene.json'), os.path.join(expr_dir, 'final_cloud.ply'))