コード例 #1
0
def unpack_projected_dataset(dfaust_map, h5py_dir, dump_dir, num_angs=10):
    dfaust_map.num_angs = num_angs
    # A bit hacky
    world2cam_mats = []
    for i_ang, ang in enumerate(np.linspace(0, 2 * np.pi, num_angs)):
        Ry = np.array([[np.cos(ang), 0, -np.sin(ang), 0], [0., 1, 0, 0],
                       [np.sin(ang), 0, np.cos(ang), 0], [0., 0., 0., 1.]],
                      dtype=np.float32)
        world2cam_mats.append(
            np.linalg.inv(np.matmul(Ry, CAM2WORLD)).astype('float32'))

    dfaust_map.world2cam_mats = world2cam_mats
    # TODO ---------- Render Stub -----------#
    render.setup(RENDER_INFO)
    # TODO ---------- Render Stub -----------#
    unpack_dataset(dfaust_map, h5py_dir, dump_dir)
コード例 #2
0
ファイル: init.py プロジェクト: tws0002/Nuke-2
def main():
    import os
    import sys
    import callback
    import render
    import wlf.mp_logging
    import patch.precomp

    try:
        import validation
    except ImportError:
        __import__('nuke').message('Plugin\n {} crushed.'.format(
            os.path.normpath(os.path.join(__file__, '../../'))))
        sys.exit(1)

    wlf.mp_logging.basic_config()

    validation.setup()
    callback.setup()
    render.setup()
    patch.precomp.enable()

    # Recover outter scope env.
    globals().update(_GLOBAL_BEFORE_INIT)
コード例 #3
0
def voxelized_pointcloud_sampling(path):
    try:
        out_file = path + '/voxelized_point_cloud_{}res_{}points.npz'.format(
            args.res, args.num_points)

        if os.path.exists(out_file):
            if args.write_over:
                print('overwrite ', out_file)
            else:
                print('File exists. Done.')
                return
        off_path = path + '/isosurf_scaled.off'

        # mesh = trimesh.load(off_path)
        # point_cloud = mesh.sample(args.num_points)
        V, F = objloader.LoadOff(off_path)
        # set up camera information
        info = {
            'Height': 480,
            'Width': 640,
            'fx': 575,
            'fy': 575,
            'cx': 319.5,
            'cy': 239.5
        }
        render.setup(info)

        # set up mesh buffers in cuda
        context = render.SetMesh(V, F)

        # cam2world = np.array([[ 0.85408425,  0.31617427, -0.375678  ,  0.56351697 * 2],
        #     [ 0.        , -0.72227067, -0.60786998,  0.91180497 * 2],
        #     [-0.52013469,  0.51917219, -0.61688   ,  0.92532003 * 2+3],
        #     [ 0.        ,  0.        ,  0.        ,  1.        ]], dtype=np.float32)
        cam2world = np.array(
            [[0.85408425, 0.31617427, -0.375678, 0.56351697 * 2],
             [0., -0.72227067, -0.60786998, 0.91180497 * 2],
             [-0.52013469, 0.51917219, -0.61688, 0.92532003 * 2],
             [0., 0., 0., 1.]],
            dtype=np.float32)
        world2cam = np.linalg.inv(cam2world).astype('float32')
        # the actual rendering process
        render.render(context, world2cam)

        # get information of mesh rendering
        # vindices represents 3 vertices related to pixels
        vindices, vweights, findices = render.getVMap(context, info)
        visible_indices = np.unique(vindices)
        # visible_points = V[visible_indices]
        # visible_faces =  F[np.unique(findices)]
        num_visible = visible_indices.shape[0]

        # # faces for partial boun
        # for i in range(num_visible):
        #     origin_index = visible_indices[i]
        #     visible_faces = np.where(visible_faces==origin_index,i,visible_faces)

        # max_index = num_visible - 1
        # faces_valid = np.ones((visible_faces.shape[0]),dtype=bool)
        # for i in range(visible_faces.shape[0]):
        #     face = visible_faces[i]
        #     if face[0] > max_index or face[1] > max_index or face[2] > max_index:
        #         faces_valid[i] = False
        # visible_faces = visible_faces[faces_valid]

        # for visualization
        # vis_face = findices.astype('float32') / np.max(findices)
        # sio.imsave(path + '/face.png',vis_face)
        # sio.imsave(path + '/vertex.png',vweights)
        # return num_visible

        if num_visible < args.num_points:
            print("visible_num is ", num_visible)
            raise Exception
        sample_idx = np.random.choice(num_visible,
                                      args.num_points,
                                      replace=False)
        valid_indices = visible_indices[sample_idx]
        point_cloud = V[valid_indices]
        # num_valid = point_cloud.shape[0]

        # for i in range(num_valid):
        #     origin_index = valid_indices[i]
        #     visible_faces = np.where(visible_faces==origin_index,i,visible_faces)

        # max_index = num_valid - 1
        # faces_valid = np.ones((visible_faces.shape[0]),dtype=bool)
        # for i in range(visible_faces.shape[0]):
        #     face = visible_faces[i]
        #     if face[0] > max_index or face[1] > max_index or face[2] > max_index:
        #         faces_valid[i] = False
        # visible_faces = visible_faces[faces_valid]

        occupancies = np.zeros(len(grid_points), dtype=np.int8)

        _, idx = kdtree.query(point_cloud)
        occupancies[idx] = 1

        compressed_occupancies = np.packbits(occupancies)

        np.savez(out_file,
                 point_cloud=point_cloud,
                 compressed_occupancies=compressed_occupancies,
                 bb_min=bb_min,
                 bb_max=bb_max,
                 res=args.res)

        print('Finished {}'.format(path))

        # prepare data for boundary sampling

        # visible_
        # visible_mesh = trimesh.base.Trimesh(vertices=point_cloud,faces=visible_faces)
        # visible_mesh.export(path + '/vis_mesh.obj')
        # boundary_sampling(path,visible_mesh)
    except Exception as err:
        print('Error with {}: {}'.format(path, traceback.format_exc()))
コード例 #4
0
import sys
import skimage.io as sio
import os
#from gen_poses import GetPoses,WavePose
import shutil
from objloader import LoadTextureOBJ
import render
import objloader


input_obj = sys.argv[1]
V, F, VT, FT, VN, FN, face_mat, kdmap = objloader.LoadTextureOBJ(input_obj)

# set up camera information
info = {'Height':960, 'Width':1280, 'fx':575*2, 'fy':575*2, 'cx':640, 'cy':480}
render.setup(info)

context = render.SetMesh(V, F)

cam2world = np.array([[ 0.85408425,  0.31617427, -0.375678  ,  0.56351697 * 2],
       [ 0.        , -0.72227067, -0.60786998,  0.91180497 * 2],
       [-0.52013469,  0.51917219, -0.61688   ,  0.92532003 * 2],
       [ 0.        ,  0.        ,  0.        ,  1.        ]], dtype=np.float32)

world2cam = np.linalg.inv(cam2world).astype('float32')
render.render(context, world2cam)
depth = render.getDepth(info)
vindices, vweights, findices = render.getVMap(context, info)

sio.imsave('depth.png', depth / np.max(depth))
sio.imsave('vweights.png', vweights)
コード例 #5
0
ファイル: main.py プロジェクト: Secret-Plans/ConsoleCraft
    elif user_input == "a":
        move_direction = "left"
    elif user_input == "s":
        move_direction = "down"
    elif user_input == "d":
        move_direction = "right"

    if move_direction != "":
        player.move(move_direction, world)

    return player, world


# Runs setup for rendering
print("Setting up Rendering")
render.setup()
print(render.set_colour("bright green"))
print("This text should be in green.")
print(render.set_colour("white"))

# Loading
print("Loading Assets")

# Loading Tileset Data
tileset_data = {}
_dir = "Data/Tile Data.json"
with open(_dir, "r") as f:
    tileset_data = json.load(f)

# Loading Ores
ores = {}
コード例 #6
0
def main():
    render.setup()
    editor = Editor()
コード例 #7
0
ファイル: core.py プロジェクト: ALSchwalm/Trace
def setup():
    global player
    
    player = obj.Player(1, 1, 1)
        
    render.setup()
コード例 #8
0
ファイル: main.py プロジェクト: ALSchwalm/GeneticImages
def main():
    ga = setup()
    render.setup(ga)
    render.start()