Example #1
0
def main():
    # ip = MovieImageProvider("resources/Video4.avi",0,70)
    global camera
    camera = Camera()
    camera.load_from_file("../resources/camera_params.xml")
    global rng
    rng = RNG()
    glutInit()
    glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE)
    glutInitWindowPosition(2000, 0)
    glutInitWindowSize(640, 480)
    glutCreateWindow("AR")
    glShadeModel(GL_SMOOTH)
    glEnable(GL_DEPTH_TEST)
    glutDisplayFunc(draw)
    glutReshapeFunc(reshape)

    def idle(int):
        glutPostRedisplay()
        glutTimerFunc(50, idle, 0)

    glutKeyboardFunc(key)
    glutSpecialFunc(key)
    glutTimerFunc(50, idle, 0)
    glutMainLoop()
    pass
Example #2
0
def main():
    #ip = MovieImageProvider("resources/Video4.avi",0,70)
    global camera
    camera = Camera()
    camera.load_from_file("../resources/camera_params.xml")
    global rng
    rng = RNG()
    glutInit()
    glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE)
    glutInitWindowPosition(2000, 0)
    glutInitWindowSize(640, 480)
    glutCreateWindow("AR")
    glShadeModel(GL_SMOOTH)
    glEnable(GL_DEPTH_TEST)
    glutDisplayFunc(draw)
    glutReshapeFunc(reshape)

    def idle(int):
        glutPostRedisplay()
        glutTimerFunc(50, idle, 0)

    glutKeyboardFunc(key)
    glutSpecialFunc(key)
    glutTimerFunc(50, idle, 0)
    glutMainLoop()
    pass
Example #3
0
def parse_scene_json(scene_json):
    """
    Param: scene_file - json file of scene description
    Returns: Scene obj to render
    """

    with open(scene_json, 'r') as scene_des:
        scene = json.loads(scene_des.read())['scene']

    # objects, lights, camera = scene['shapes'], scene['lights'], scene['camera']
    cam = Camera(**scene['camera'])

    lights = []
    for i in range(len(scene['lights'])):
        if scene['lights'][i]['type'] == 'point':
            lights.append(PointLight(**scene['lights'][i]))
        elif scene['lights'][i]['type'] == 'directional':
            lights.append(DirLight(**scene['lights'][i]))

    objects = []
    for obj in scene['shapes']:
        kwargs = obj['geomParams']
        if obj['mat_type'] == 'base':
            obj_type = RefBlinnMaterial
        elif obj['mat_type'] == 'transmissive':
            obj_type = TransmissiveMaterial
        elif obj['mat_type'] == 'reflective':
            obj_type = ReflectiveMaterial
        elif obj['mat_type'] == 'path_blinn':
            obj_type = PathBlinnMaterial

        kwargs.update({'material': obj_type(**obj['material'])})
        if obj['geometry'] == 'sphere':
            scene_obj = Sphere(**kwargs)
        elif obj['geometry'] == 'cylinder':
            scene_obj = Cylinder(**kwargs)
        elif obj['geometry'] == 'plane':
            scene_obj = Plane(**kwargs)
        elif obj['geometry'] == 'triangle':
            scene_obj = Triangle(**kwargs)
        elif obj['geometry'] == 'cone':
            scene_obj = Cone(**kwargs)
        elif obj['geometry'] == 'cuboid':
            scene_obj = Cuboid(**kwargs)
        objects.append(scene_obj)
    return Scene(cam, lights, objects)
Example #4
0
def run(width, height, colors_to_randomize, random_factor, settings):
    '''passos:'''
    """1) camera
    1.1) normalizar N
    1.2) V = V - proj N (V)
    1.3) U = N x V")
    t1.4) alfa = UNV = {U, N, V}"""
    cam = Camera(settings.camera_input)
    """2) cena"""
    sc = scene.Scene(settings.object_input, settings.iluminacao_input)
    """2.1) passar a posição da fonte de luz de coordenadas de mundo para coordenadas de vista"""
    pl_view = cam.to_view_coordinate_system(sc.pl)
    sc.pl = pl_view
    """2.2) para cada ponto do objeto, projete-o para coordenadas de vista"""
    for p in sc.points:
        sc.view_coordinates.append(cam.to_view_coordinate_system(p))
    """
    2.3) inicializar as normais de todos os pontos do objeto com zero
    2.4) para cada triângulo calcular a normal do triângulo e normalizá-la. somar ela à normal
     de cada um dos 3 pontos (vértices do triângulo) e cria os Triangulos com vertices em coordenada de vista
    """
    for t in sc.triangles:
        p1, p2, p3 = sc.view_coordinates[t[0] - 1], sc.view_coordinates[
            t[1] - 1], sc.view_coordinates[t[2] - 1]

        tr_normal = cam.get_triangle_normal(p1, p2, p3)
        tr_normal = operations.vector.normalize(tr_normal)

        sc.triangles_view_objects.append(Triangle(p1, p2, p3, norm=tr_normal))

        sc.points_normal[t[0] - 1] += tr_normal
        sc.points_normal[t[1] - 1] += tr_normal
        sc.points_normal[t[2] - 1] += tr_normal

    for i in range(len(sc.points_normal)):
        sc.points_normal[i] = operations.vector.normalize(sc.points_normal[i])
    """2.6) para cada ponto do obj, projete-o para coord de tela 2D, sem descartar os pontos em coord 3D"""
    for vp in sc.view_coordinates:
        sc.screen_coordinates.append(cam.to_screen_coordinate_system(vp))
    """2.7) Inicializa z-buffer."""
    sc.init_zbuffer(width, height)

    #passando os triângulos para coordenadas de tela
    sc.create_triangle_screen_objects()
    """2.8) Realiza a varredura nos triangulos em coordenada de tela."""
    sc.rasterize_screen_triangles(colors_to_randomize, random_factor)

    return sc
Example #5
0
# Renderfunktion - wird pro Pixel aufgerufen
def render_pix(x, y, color):
    img.put(color.toValidatedHexString(), (x, HEIGHT-y))
    if x%320 == 0:
        canvas.update()

# Fenster & Canvas aufbauen
mw = Tk()
mw._root().wm_title("Raytracer")

cFrame = Frame(mw, width=WIDTH, height=HEIGHT)
cFrame.pack()
canvas = Canvas(cFrame, width=WIDTH, height=HEIGHT, bg="white")

# Bild für Pixelunterstützung
img = PhotoImage(width=WIDTH, height=HEIGHT)
canvas.create_image(0, 0, image=img, anchor=NW)
canvas.pack()

# camera initialisieren
camera = Camera(Point(0,2,10), Vector(0,1,0), Point(0,3,0), FIELD_OF_VIEW)
camera.setScreenSize(WIDTH, HEIGHT)

# Anfangen zu Rendern, nachdem Canvas sichtbar ist
mw.wait_visibility()
camera.render(render_pix, objectList, lightList, level=RENDER_LEVEL)

# start
mw.mainloop()
Example #6
0
def main():
    ##############################################
    ##              ASSET LOADING               ##
    ##############################################

    hdrs, balls, grasses = util.load_assets()

    ##############################################
    ##             ENVIRONMENT SETUP            ##
    ##############################################

    with open(hdrs[0]["info_path"], "r") as f:
        env_info = json.load(f)
    render_layer_toggle, world = util.setup_environment(hdrs[0], env_info)

    ##############################################
    ##            SCENE CONSTRUCTION            ##
    ##############################################

    # Construct our default UV sphere ball
    ball = Ball("Ball", scene_config.resources["ball"]["mask"]["index"],
                balls[0])

    # Construct our goals
    goals = [
        Goal(scene_config.resources["goal"]["mask"]["index"]),
        Goal(scene_config.resources["goal"]["mask"]["index"]),
    ]

    # Create robots to fill the scene
    robots = [
        Robot(
            "r{}".format(ii),
            scene_config.resources["robot"]["mask"]["index"],
            scene_config.resources["robot"],
        ) for ii in range(scene_config.num_robots + 1)
    ]

    # Construct our shadowcatcher
    shadowcatcher = ShadowCatcher()

    # Generate a new configuration for field configuration
    config = scene_config.configure_scene()

    # Construct our grass field
    field = Field(scene_config.resources["field"]["mask"]["index"])

    # Construct cameras
    cam_l = Camera("Camera_L")
    cam_r = Camera("Camera_R")

    # Set left camera to be parent camera
    # (and so all right camera movements are relative to the left camera position)
    cam_r.set_stereo_pair(cam_l.obj)

    # Attach camera to robot head (TODO: Remove hard-coded torso to cam offset)
    cam_l.obj.delta_rotation_euler = (pi / 2.0, 0.0, -pi / 2.0)
    cam_l.set_robot(robots[0].obj, robots[0].obj.location[2] + 0.33)
    # Disable rendering of head if camera is now inside
    robots[0].objs[robots[0].name + "_Head"].hide_render = True

    # Create camera anchor target for random field images
    anch = CameraAnchor()

    # Add randomly generated shapes into scene
    shapes = [
        Shape("s{}".format(ii), 0) for ii in range(scene_config.num_shapes)
    ]

    ##############################################
    ##               SCENE UPDATE               ##
    ##############################################

    for frame_num in range(1, out_cfg.num_images + 1):
        # Generate a new configuration
        config = scene_config.configure_scene()

        cam_l.update(config["camera"])

        # Update shapes
        for ii in range(len(shapes)):
            shapes[ii].update(config["shape"][ii])

        # Select the ball, environment, and grass to use
        hdr_data = random.choice(hdrs)
        ball_data = random.choice(balls)
        grass_data = random.choice(grasses)

        # Load the environment information
        with open(hdr_data["info_path"], "r") as f:
            env_info = json.load(f)

        is_semi_synthetic = (not env_info["to_draw"]["goal"]
                             or not env_info["to_draw"]["field"])

        # In that case we must use the height provided by the file
        if is_semi_synthetic:
            config["robot"][0]["position"] = (
                0.0,
                0.0,
                env_info["position"]["z"] - 0.33,
            )

        # Calculate camera location
        camera_loc = (0.0, 0.0, env_info["position"]["z"])
        # Only move camera robot if we're generating the field
        robot_start = 1 if is_semi_synthetic else 0

        points_on_field = util.point_on_field(camera_loc,
                                              hdr_data["mask_path"], env_info,
                                              len(robots) + 1)
        print(points_on_field)
        for ii in range(robot_start, len(robots)):
            # If we are autoplacing update the configuration
            if (config["robot"][ii]["auto_position"] and is_semi_synthetic
                    and len(points_on_field) > 0):
                # Generate new ground point based on camera (actually robot parent of camera)
                config["robot"][ii]["position"] = (
                    points_on_field[ii][0],
                    points_on_field[ii][1],
                    env_info["position"]["z"] -
                    0.33 if ii == 0 else config["robot"][ii]["position"][2],
                )
            # Update robot (and camera)
            robots[ii].update(config["robot"][ii])

        # Update ball
        # If we are autoplacing update the configuration
        if (config["ball"]["auto_position"] and is_semi_synthetic
                and len(points_on_field) > 0):
            # Generate new ground point based on camera (actually robot parent of camera)
            config["ball"]["position"] = (
                points_on_field[0][0],
                points_on_field[0][1],
                config["ball"]["position"][2],
            )

        # Apply the updates
        field.update(grass_data, config["field"])
        ball.update(ball_data, config["ball"])

        # Update goals
        for g in goals:
            g.update(config["goal"])
        goals[1].rotate((0, 0, pi))
        goal_height_offset = -3.0 if config["goal"][
            "shape"] == "square" else -1.0
        goals[0].move((
            config["field"]["length"] / 2.0,
            0,
            config["goal"]["height"] +
            goal_height_offset * config["goal"]["post_width"],
        ))
        goals[1].move((
            -config["field"]["length"] / 2.0,
            0,
            config["goal"]["height"] +
            goal_height_offset * config["goal"]["post_width"],
        ))

        # Hide objects based on environment map
        ball.obj.hide_render = not env_info["to_draw"]["ball"]
        field.hide_render(not env_info["to_draw"]["field"])
        goals[0].hide_render(not env_info["to_draw"]["goal"])
        goals[1].hide_render(not env_info["to_draw"]["goal"])

        # Update anchor
        anch.update(config["anchor"])

        # Set a tracking target randomly to anchor/ball or goal
        valid_tracks = []
        if env_info["to_draw"]["ball"]:  # Only track balls if it's rendered
            valid_tracks.append(ball)
        if env_info["to_draw"]["goal"]:  # Only track goals if they're rendered
            valid_tracks.append(random.choice(goals))
        if env_info["to_draw"][
                "field"]:  # Only pick random points if the field is rendered
            valid_tracks.append(anch)

        tracking_target = random.choice(valid_tracks).obj
        cam_l.set_tracking_target(tracking_target)
        robots[0].set_tracking_target(tracking_target)

        print('[INFO] Frame {0}: ball: "{1}", map: "{2}", target: {3}'.format(
            frame_num,
            os.path.basename(ball_data["colour_path"]),
            os.path.basename(hdr_data["raw_path"]),
            tracking_target.name,
        ))

        # Updates scene to rectify rotation and location matrices
        bpy.context.view_layer.update()

        ##############################################
        ##                RENDERING                 ##
        ##############################################

        filename = str(frame_num).zfill(out_cfg.filename_len)

        if out_cfg.output_depth:
            # Set depth filename
            render_layer_toggle[2].file_slots[0].path = filename + ".exr"

        # Render for the main camera only
        bpy.context.scene.camera = cam_l.obj

        # Use multiview stereo if stereo output is enabled
        # (this will automatically render the second camera)
        if out_cfg.output_stereo:
            bpy.context.scene.render.use_multiview = True

        # Render raw image
        util.render_image(
            isMaskImage=False,
            toggle=render_layer_toggle,
            shadowcatcher=shadowcatcher,
            world=world,
            env=env,
            hdr_path=hdr_data["raw_path"],
            strength=config["environment"]["strength"],
            env_info=env_info,
            output_path=os.path.join(out_cfg.image_dir,
                                     "{}.png".format(filename)),
        )

        # Render mask image
        util.render_image(
            isMaskImage=True,
            toggle=render_layer_toggle,
            shadowcatcher=shadowcatcher,
            world=world,
            env=env,
            hdr_path=hdr_data["mask_path"],
            strength=1.0,
            env_info=env_info,
            output_path=os.path.join(out_cfg.mask_dir,
                                     "{}.png".format(filename)),
        )

        if out_cfg.output_depth:
            # Rename our mis-named depth file(s) due to Blender's file output node naming scheme!
            if out_cfg.output_stereo:
                os.rename(
                    os.path.join(out_cfg.depth_dir, filename) + "_L.exr0001",
                    os.path.join(out_cfg.depth_dir, filename) + "_L.exr",
                )
                os.rename(
                    os.path.join(out_cfg.depth_dir, filename) + "_R.exr0001",
                    os.path.join(out_cfg.depth_dir, filename) + "_R.exr",
                )
            else:
                os.rename(
                    os.path.join(out_cfg.depth_dir, filename) + ".exr0001",
                    os.path.join(out_cfg.depth_dir, filename) + ".exr",
                )

        # Generate meta file
        with open(os.path.join(out_cfg.meta_dir, "{}.yaml".format(filename)),
                  "w") as meta_file:
            # Gather metadata
            meta = config

            meta.update({"rendered": env_info["to_draw"]})

            # Add basic camera information
            meta["camera"]["focus"] = tracking_target.name
            meta["camera"]["lens"] = {}
            meta["camera"]["lens"]["sensor_height"] = cam_l.cam.sensor_height
            meta["camera"]["lens"]["sensor_width"] = cam_l.cam.sensor_width

            # Add the final camera matrices
            if not out_cfg.output_stereo:
                meta["camera"]["matrix"] = util.matrix_to_list(
                    cam_l.obj.matrix_world)
            else:
                template = meta["camera"]
                meta["camera"] = {
                    "left": {
                        **template,
                        "matrix":
                        util.matrix_to_list(cam_l.obj.matrix_world),
                    },
                    "right": {
                        **template,
                        "matrix":
                        util.matrix_to_list(cam_r.obj.matrix_world),
                    },
                }

            meta["environment"]["file"] = os.path.relpath(
                hdr_data["raw_path"], scene_config.res_path)

            # Write metadata to file
            json.dump(meta, meta_file, indent=4, sort_keys=True)
Example #7
0
def render_pix(x, y, color):
    img.put(color.toValidatedHexString(), (x, HEIGHT - y))
    if x % 320 == 0:
        canvas.update()


# Fenster & Canvas aufbauen
mw = Tk()
mw._root().wm_title("Raytracer")

cFrame = Frame(mw, width=WIDTH, height=HEIGHT)
cFrame.pack()
canvas = Canvas(cFrame, width=WIDTH, height=HEIGHT, bg="white")

# Bild für Pixelunterstützung
img = PhotoImage(width=WIDTH, height=HEIGHT)
canvas.create_image(0, 0, image=img, anchor=NW)
canvas.pack()

# camera initialisieren
camera = Camera(Point(0, 2, 10), Vector(0, 1, 0), Point(0, 3, 0),
                FIELD_OF_VIEW)
camera.setScreenSize(WIDTH, HEIGHT)

# Anfangen zu Rendern, nachdem Canvas sichtbar ist
mw.wait_visibility()
camera.render(render_pix, objectList, lightList, level=RENDER_LEVEL)

# start
mw.mainloop()