class Racer: position = vec3(0,0,0) velocity = vec3(0,0,0) heading = vec3(1,0,0) speed = 0.0 maxSpeedRoad = 100.0 maxSpeedRough = 50.0 zOffset = 3.0 angvel = 2.0 terrain = None model = None def render(self, view, renderingSystem): # TODO 1.3: This is a good place to draw the racer model instead of the sphere #lu.drawSphere(self.position, 1.5, [1,0,0,1], view) #lu.make_mat4_from_zAxis(Translation, zAxis, yAxis) modelTransform = lu.make_mat4_from_zAxis(self.position, -self.heading, [0,0,1]) #set Z axis to heading renderingSystem.drawObjModel(self.model, modelTransform, view) def load(self, objModelName, terrain, renderingSystem): self.terrain = terrain self.position = terrain.startLocations[0] # TODO 1.3: This is a good place create and load the racer model self.model = ObjModel(objModelName) def update(self, dt, keyStateMap): info = self.terrain.getInfoAt(self.position); # Select max speed based on material maxSpeed = self.maxSpeedRoad if info.material == TerrainInfo.M_Road else self.maxSpeedRough; targetVel = vec3(0.0) if keyStateMap["UP"]: targetVel = self.heading * -maxSpeed; if keyStateMap["DOWN"]: targetVel = self.heading * maxSpeed; # linearly interpolate towards the target velocity - this means it is tied to the frame rate, which kind of is bad. self.velocity = lu.mix(self.velocity, targetVel, 0.01) self.speed = lu.length(self.velocity); rotationMat = lu.Mat4() if keyStateMap["LEFT"]: rotationMat = lu.make_rotation_z(dt * self.angvel) if keyStateMap["RIGHT"]: rotationMat = lu.make_rotation_z(dt * -self.angvel) self.heading = lu.Mat3(rotationMat) * self.heading; # get height of ground at this point. self.position += self.velocity * dt; # TODO 1.1: After the terrain height is correct, uncomment this line to make the racer follow the ground height self.position[2] = lu.mix(self.position[2], info.height + self.zOffset, 0.1); def drawUi(self): imgui.label_text("Speed", "%0.1fm/s"%self.speed)
class Prop: position = vec3(0, 0, 0) # start at 0,0,0 to make x-axis world rot easier facing = vec3(1, 0, 0) #world space "heading" rotAmount = 0.0 #how much to rotate by model = None #model to use terrain = None #not sure if I need this propType = 0 # propType will be used for prop specific scaling/etc zOffset = 3.0 def render(self, view, renderingSystem): #put switch statement here for each type of prop if ann where appropriate modelToWorldTransform = lu.make_mat4_from_zAxis( self.position, self.facing, vec3(0, 0, 1)) rotationByRandAmount = lu.make_rotation_z(self.rotAmount) renderingSystem.drawObjModel( self.model, rotationByRandAmount * modelToWorldTransform, view) def load(self, model, terrain, renderingSytem, position): self.model = model[0] self.propType = model[1] self.rotAmount = random.uniform(0.001, 6.28319) # ~0 to ~360 self.terrain = terrain #info = self.terrain.getInfoAt(position) # need to set position height to terrain height self.position = position def update(self): info = self.terrain.getInfoAt(self.position) self.position[2] = lu.mix(self.position[2], info.height + self.zOffset, 0.1)
def update(dt, keyStateMap, mouseDelta): global g_sunPosition global g_sunAngle global g_globalAmbientLight global g_sunLightColour global g_sunAngle global g_updateSun global g_viewTarget global g_viewPosition global g_followCamOffset global g_followCamLookOffset if g_updateSun: g_sunAngle += dt * 0.25 g_sunAngle = g_sunAngle % (2.0 * math.pi) g_sunPosition = lu.Mat3( lu.make_rotation_x(g_sunAngle)) * g_sunStartPosition g_sunLightColour = sampleKeyFrames( lu.dot(lu.normalize(g_sunPosition), vec3(0.0, 0.0, 1.0)), g_sunKeyFrames) g_globalAmbientLight = sampleKeyFrames( lu.dot(lu.normalize(g_sunPosition), vec3(0.0, 0.0, 1.0)), g_ambientKeyFrames) g_racer.update(dt, keyStateMap) # TODO 1.2: Make the camera look at the racer. Code for updating the camera should be done after the # racer, otherwise the offset will lag and it generally looks weird. if imgui.tree_node("Camera", imgui.TREE_NODE_DEFAULT_OPEN): _, g_followCamOffset = imgui.slider_float("FollowCamOffset ", g_followCamOffset, 2.0, 100.0) _, g_followCamLookOffset = imgui.slider_float("FollowCamLookOffset", g_followCamLookOffset, 0.0, 100.0) imgui.tree_pop() if imgui.tree_node("Racer", imgui.TREE_NODE_DEFAULT_OPEN): g_racer.drawUi() imgui.tree_pop() if imgui.tree_node("Terrain", imgui.TREE_NODE_DEFAULT_OPEN): g_terrain.drawUi() imgui.tree_pop() if imgui.tree_node("Lighting", imgui.TREE_NODE_DEFAULT_OPEN): _, g_globalAmbientLight = lu.imguiX_color_edit3_list( "GlobalAmbientLight", g_globalAmbientLight ) #, imgui.GuiColorEditFlags_Float);// | ImGuiColorEditFlags_HSV); _, g_sunLightColour = lu.imguiX_color_edit3_list( "SunLightColour", g_sunLightColour ) #, imgui.GuiColorEditFlags_Float);// | ImGuiColorEditFlags_HSV); _, g_sunAngle = imgui.slider_float("SunAngle", g_sunAngle, 0.0, 2.0 * math.pi) _, g_updateSun = imgui.checkbox("UpdateSun", g_updateSun) imgui.tree_pop()
def update(self, dt, keyStateMap): info = self.terrain.getInfoAt(self.position) # Select max speed based on material maxSpeed = self.maxSpeedRoad if info.material == TerrainInfo.M_Road else self.maxSpeedRough targetVel = vec3(0.0) if keyStateMap["UP"]: targetVel = self.heading * maxSpeed if keyStateMap["DOWN"]: targetVel = self.heading * -maxSpeed # linearly interpolate towards the target velocity - this means it is tied to the frame rate, which kind of is bad. self.velocity = lu.mix(self.velocity, targetVel, 0.01) self.speed = lu.length(self.velocity) rotationMat = lu.Mat4() if keyStateMap["LEFT"]: rotationMat = lu.make_rotation_z(dt * self.angvel) if keyStateMap["RIGHT"]: rotationMat = lu.make_rotation_z(dt * -self.angvel) self.heading = lu.Mat3(rotationMat) * self.heading # get height of ground at this point. self.position += self.velocity * dt # TODO 1.1: After the terrain height is correct, uncomment this line to make the racer follow the ground height self.position[2] = lu.mix(self.position[2], info.height + self.zOffset, 0.1)
def render(self, view, renderingSystem): #put switch statement here for each type of prop if ann where appropriate modelToWorldTransform = lu.make_mat4_from_zAxis( self.position, self.facing, vec3(0, 0, 1)) rotationByRandAmount = lu.make_rotation_z(self.rotAmount) renderingSystem.drawObjModel( self.model, rotationByRandAmount * modelToWorldTransform, view)
def getPosition(x, rotationSpeed, time): radius = sqrt(x * x + 0 * 0) theta = atan2(0, x) theta += time * rotationSpeed x = radius * cos(theta) y = radius * sin(theta) return lu.vec3(x, y, 0)
def render(self, view, renderingSystem): getInfo = self.terrain.getInfoAt(self.position) self.position[2] = lu.mix(self.position[2], getInfo.height - self.zOffset,\ 1) rotation = lu.make_rotation_z(self.randRot) modelToWorldTransform = lu.make_mat4_from_zAxis(self.position, self.heading, \ vec3(0,0,1)) renderingSystem.drawObjModel(self.model, rotation * modelToWorldTransform, view)
class Prop: position = vec3(0, 0, 0) heading = vec3(1, 0, 0) rotation = 0.0 zOffset = 3.0 angvel = 2.0 model = None def render(self, view, renderingSystem): modelToWorldTransform = lu.make_mat4_from_zAxis( self.position, self.heading, [0.0, 0.0, 1.0]) rotationTransform = lu.make_rotation_y(self.rotation) renderingSystem.drawObjModel(self.model, modelToWorldTransform * rotationTransform, view) def load(self, model, locations): self.position = random.choice(locations) self.rotation = random.choice(range(0, 360)) self.model = model
class Props: position = vec3(0, 0, 0) heading = vec3(1, 0, 0) ##Random Rotation for z variable randRot = random.uniform(0, 6.28) terrain = None model = None zOffset = 1.0 def render(self, view, renderingSystem): getInfo = self.terrain.getInfoAt(self.position) self.position[2] = lu.mix(self.position[2], getInfo.height - self.zOffset,\ 1) rotation = lu.make_rotation_z(self.randRot) modelToWorldTransform = lu.make_mat4_from_zAxis(self.position, self.heading, \ vec3(0,0,1)) renderingSystem.drawObjModel(self.model, rotation * modelToWorldTransform, view) def load(self, objModelName, terrain, position, renderingSystem): self.terrain = terrain self.model = ObjModel(objModelName) self.position = position self.randRot = random.uniform(0, 6.28) #0 to 2pi
def renderFrame(uiWidth, width, height): global g_triangleVerts global g_cameraDistance global g_cameraYawDeg global g_cameraPitchDeg global g_yFovDeg global g_lightYaw global g_lightDistance global g_lightPitch # This configures the fixed-function transformation from Normalized Device Coordinates (NDC) # to the screen (pixels - called 'window coordinates' in OpenGL documentation). # See: https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glViewport.xhtml glViewport(0, 0, width, height) # Set the colour we want the frame buffer cleared to, glClearColor(0.2, 0.5, 0.1, 1.0) # Tell OpenGL to clear the render target to the clear values for both depth and colour buffers (depth uses the default) glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT) viewToClipTransform = lu.make_perspective(g_yFovDeg, width / height, 0.1, 50.0) eyePos = lu.Mat3(lu.make_rotation_y( math.radians(g_cameraYawDeg))) * lu.Mat3( lu.make_rotation_x( math.radians(g_cameraPitchDeg))) * [0, 0, g_cameraDistance] worldToViewTransform = magic.make_lookAt(eyePos, [0, 0, 0], [0, 1, 0]) worldToClipTransform = viewToClipTransform * worldToViewTransform lightRotation = lu.Mat3(lu.make_rotation_y( math.radians(g_lightYaw))) * lu.Mat3( lu.make_rotation_x(math.radians(g_lightPitch))) lightPosition = [0.02, 0, 0] + lu.vec3(0, 23, 0.2) draw_court(worldToClipTransform, lightPosition) lu.drawSphere([0.23, -0.45, 0.1, 0], 0.007, [0, 1, 0, 0.5], viewToClipTransform, worldToViewTransform) lu.drawSphere(lightPosition, 0.01, [1, 1, 0, 1], viewToClipTransform, worldToViewTransform)
g_backGroundColour = [0.1, 0.2, 0.1] g_fov = 60.0 g_nearDistance = 0.2 g_farDistance = 2000.0 g_viewPosition = [100.0, 100.0, 100.0] g_viewTarget = [0.0, 0.0, 0.0] g_viewUp = [0.0, 0.0, 1.0] g_followCamOffset = 34.0 #looks better! g_followCamLookOffset = 10.0 g_sunStartPosition = [0.0, 0.0, 1000.0] g_sunPosition = g_sunStartPosition g_globalAmbientLight = vec3(0.045, 0.045, 0.045) g_sunLightColour = [0.9, 0.8, 0.7] g_updateSun = True g_sunAngle = 0.0 g_fbo = None g_shadowTexId = None g_shadowShader = None g_terrain = None g_racer = None g_props = [] g_TU_shadow = None # # Key-frames for the sun light and ambient, picked by hand-waving to look ok. Note how most of this is nonsense from a physical point of view and # some of the reasoning is basically to compensate for the lack of exposure (or tone-mapping).
def load(self, imageName, renderingSystem): with Image.open(imageName) as im: self.imageWidth = im.size[0] self.imageHeight = im.size[1] self.imageData = im.tobytes("raw", "RGBX" if im.mode == 'RGB' else "RGBA", 0, -1) xyOffset = -vec2(float(self.imageWidth), float( self.imageHeight)) * self.xyScale / 2.0 # Calculate vertex positions terrainVerts = [] for j in range(self.imageHeight): for i in range(self.imageWidth): offset = (j * self.imageWidth + i) * 4 # copy pixel 4 channels imagePixel = self.imageData[offset:offset + 4] # Normalize the red channel from [0,255] to [0.0, 1.0] red = float(imagePixel[0]) / 255.0 xyPos = vec2(i, j) * self.xyScale + xyOffset # TODO 1.1: set the height #zPos = 0.0 zPos = red * self.heightScale # Change from zero for 1.1 pt = vec3(xyPos[0], xyPos[1], zPos) terrainVerts.append(pt) green = imagePixel[1] if green == 255: self.startLocations.append(pt) if green == 128: self.treeLocations.append(pt) if green == 64: self.rockLocations.append(pt) # build vertex normals... terrainNormals = [vec3(0.0, 0.0, 1.0) ] * self.imageWidth * self.imageHeight for j in range(1, self.imageHeight - 1): for i in range(1, self.imageWidth - 1): v = terrainVerts[j * self.imageWidth + i] vxP = terrainVerts[j * self.imageWidth + i - 1] vxN = terrainVerts[j * self.imageWidth + i + 1] dx = vxP - vxN vyP = terrainVerts[(j - 1) * self.imageWidth + i] vyN = terrainVerts[(j + 1) * self.imageWidth + i] dy = vyP - vyN nP = lu.normalize(lu.cross(dx, dy)) vdxyP = terrainVerts[(j - 1) * self.imageWidth + i - 1] vdxyN = terrainVerts[(j + 1) * self.imageWidth + i + 1] dxy = vdxyP - vdxyN vdyxP = terrainVerts[(j - 1) * self.imageWidth + i + 1] vdyxN = terrainVerts[(j + 1) * self.imageWidth + i - 1] dyx = vdyxP - vdyxN nD = lu.normalize(lu.cross(dxy, dyx)) terrainNormals[j * self.imageWidth + i] = lu.normalize(nP + nD) # join verts with quads that is: 2 triangles @ 3 vertices, with one less in each direction. terrainInds = [0] * 2 * 3 * (self.imageWidth - 1) * (self.imageHeight - 1) for j in range(0, self.imageHeight - 1): for i in range(0, self.imageWidth - 1): # Vertex indices to the four corners of the quad. qInds = [ j * self.imageWidth + i, j * self.imageWidth + i + 1, (j + 1) * self.imageWidth + i, (j + 1) * self.imageWidth + i + 1, ] outOffset = 3 * 2 * (j * (self.imageWidth - 1) + i) points = [ terrainVerts[qInds[0]], terrainVerts[qInds[1]], terrainVerts[qInds[2]], terrainVerts[qInds[3]], ] # output first triangle: terrainInds[outOffset + 0] = qInds[0] terrainInds[outOffset + 1] = qInds[1] terrainInds[outOffset + 2] = qInds[2] # second triangle terrainInds[outOffset + 3] = qInds[2] terrainInds[outOffset + 4] = qInds[1] terrainInds[outOffset + 5] = qInds[3] self.terrainInds = terrainInds self.vertexArrayObject = lu.createVertexArrayObject() self.vertexDataBuffer = lu.createAndAddVertexArrayData( self.vertexArrayObject, terrainVerts, 0) self.normalDataBuffer = lu.createAndAddVertexArrayData( self.vertexArrayObject, terrainNormals, 1) self.indexDataBuffer = lu.createAndAddIndexArray( self.vertexArrayObject, terrainInds) #normalDataBuffer = createAndAddVertexArrayData<vec4>(g_particleVao, { vec4(0.0f) }, 1); vertexShader = """ #version 330 in vec3 positionIn; in vec3 normalIn; uniform mat4 modelToClipTransform; uniform mat4 modelToViewTransform; uniform mat3 modelToViewNormalTransform; uniform float terrainHeightScale; uniform float terrainTextureXyScale; uniform vec2 xyNormScale; uniform vec2 xyOffset; //2.2 uniform mat4 worldToViewTransform; // 'out' variables declared in a vertex shader can be accessed in the subsequent stages. // For a fragment shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front here and in the fragment shader!). out VertexData { float v2f_height; vec3 v2f_viewSpacePosition; vec3 v2f_viewSpaceNormal; vec3 v2f_worldSpacePosition; vec3 v2f_worldSpaceNormal; // 2.1 - Steep vec2 v2f_xyNormScale; // 2.1 - Road vec2 v2f_xyOffset; // 2.1 - Road // 2.2 vec3 cameraPosition; vec3 cameraToPointVector; }; void main() { // pass the world-space Z to the fragment shader, as it is used to compute the colour and other things v2f_height = positionIn.z; v2f_worldSpacePosition = positionIn; v2f_viewSpacePosition = (modelToViewTransform * vec4(positionIn, 1.0)).xyz; v2f_viewSpaceNormal = modelToViewNormalTransform * normalIn; v2f_worldSpaceNormal = normalIn; //2.1 - Steep v2f_xyNormScale = xyNormScale; // 2.1 - Road v2f_xyOffset = xyOffset; // 2.1 - Road //2.2 cameraPosition = vec3(worldToViewTransform[3][0],worldToViewTransform[3][1],worldToViewTransform[3][2]); cameraToPointVector = normalize(positionIn - cameraPosition); // gl_Position is a buit-in 'out'-variable that gets passed on to the clipping and rasterization stages (hardware fixed function). // it must be written by the vertex shader in order to produce any drawn geometry. // We transform the position using one matrix multiply from model to clip space. Note the added 1 at the end of the position to make the 3D // coordinate homogeneous. gl_Position = modelToClipTransform * vec4(positionIn, 1.0); } """ fragmentShader = """ // Input from the vertex shader, will contain the interpolated (i.e., area weighted average) vaule out put for each of the three vertex shaders that // produced the vertex data for the triangle this fragmet is part of. in VertexData { float v2f_height; vec3 v2f_viewSpacePosition; vec3 v2f_viewSpaceNormal; vec3 v2f_worldSpacePosition; vec3 v2f_worldSpaceNormal; //2.1 - Steep vec2 v2f_xyNormScale; // 2.1 - Road vec2 v2f_xyOffset; // 2.1 - Road // 2.2 vec3 cameraPosition; vec3 cameraToPointVector; }; uniform float terrainHeightScale; uniform float terrainTextureXyScale; // 1.4 uniform sampler2D grassTexture; // Olympics uniform sampler2D wallTexture; uniform sampler2D seatsTexture; uniform sampler2D trackTexture; uniform sampler2D mapTexture; uniform sampler2D concreteTexture; out vec4 fragmentColor; void main() { vec3 materialColour = vec3(v2f_height/terrainHeightScale); // Default colour vec3 concreteColour = texture(concreteTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz; materialColour = concreteColour; // 2.1 float slope = dot(v2f_worldSpaceNormal, vec3(v2f_worldSpaceNormal.x, 0.0, v2f_worldSpaceNormal.z)); //Steep float blueChannel = texture(mapTexture, (v2f_worldSpacePosition.xy - v2f_xyOffset) * v2f_xyNormScale).z; //Road // Track Texture if (blueChannel >= 0.9) { vec3 trackColour = texture(trackTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz; materialColour = trackColour; // Grass texture } else if (v2f_height < 1) { vec3 grassColour = texture(grassTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz; materialColour = grassColour; // Wall/Banner texture } else if ((v2f_height < 11) && (slope < 0.2)) { vec3 wallColour = texture(wallTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz; materialColour = wallColour; // Seats } else if (slope < 0.2) { vec3 seatsColour = texture(seatsTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz; materialColour = seatsColour; } vec3 reflectedLight = computeShading(materialColour, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour); //fragmentColor = vec4(toSrgb(reflectedLight), 1.0); // before 2.2 //fragmentColor = vec4(toSrgb(vec3(v2f_height/terrainHeightScale)), 1.0); //start?? //2.2 - Fog //fragmentColor = vec4(toSrgb(applyFog(reflectedLight, -v2f_viewSpacePosition.z)), 1.0); // basic fog fragmentColor = vec4(toSrgb(applyFog(reflectedLight, -v2f_viewSpacePosition.z, cameraPosition, cameraToPointVector)), 1.0); } """ # Note how we provide lists of source code strings for the two shader stages. # This is basically the only standard way to 'include' or 'import' code into more than one shader. The variable renderingSystem.commonFragmentShaderCode # contains code that we wish to use in all the fragment shaders, for example code to transform the colour output to srgb. # It is also a nice place to put code to compute lighting and other effects that should be the same accross the terrain and racer for example. self.shader = lu.buildShader([vertexShader], [ "#version 330\n", renderingSystem.commonFragmentShaderCode, fragmentShader ], { "positionIn": 0, "normalIn": 1 }) # TODO 1.4: Load texture and configure the sampler self.grassTexture = ObjModel.loadTexture("grass.png", "data", True) # Olympics self.wallTexture = ObjModel.loadTexture("banner.png", "data", True) self.seatsTexture = ObjModel.loadTexture("seats.png", "data", True) self.trackTexture = ObjModel.loadTexture("track.png", "data", True) self.mapTexture = ObjModel.loadTexture("map.png", "data", False) self.concreteTexture = ObjModel.loadTexture("concrete.jpg", "data", True)
def load(self, imageName, renderingSystem): with Image.open(imageName) as im: self.imageWidth = im.size[0] self.imageHeight = im.size[1] self.imageData = im.tobytes("raw", "RGBX" if im.mode == 'RGB' else "RGBA", 0, -1) xyOffset = -vec2(float(self.imageWidth), float( self.imageHeight)) * self.xyScale / 2.0 # Calculate vertex positions terrainVerts = [] for j in range(self.imageHeight): for i in range(self.imageWidth): offset = (j * self.imageWidth + i) * 4 # copy pixel 4 channels imagePixel = self.imageData[offset:offset + 4] # Normalize the red channel from [0,255] to [0.0, 1.0] red = float(imagePixel[0]) / 255.0 xyPos = vec2(i, j) * self.xyScale + xyOffset # TODO 1.1: set the height zPos = self.heightScale * red pt = vec3(xyPos[0], xyPos[1], zPos) terrainVerts.append(pt) green = imagePixel[1] if green == 255: self.startLocations.append(pt) if green == 128: self.treeLocations.append(pt) if green == 64: self.rockLocations.append(pt) # build vertex normals... terrainNormals = [vec3(0.0, 0.0, 1.0) ] * self.imageWidth * self.imageHeight for j in range(1, self.imageHeight - 1): for i in range(1, self.imageWidth - 1): v = terrainVerts[j * self.imageWidth + i] vxP = terrainVerts[j * self.imageWidth + i - 1] vxN = terrainVerts[j * self.imageWidth + i + 1] dx = vxP - vxN vyP = terrainVerts[(j - 1) * self.imageWidth + i] vyN = terrainVerts[(j + 1) * self.imageWidth + i] dy = vyP - vyN nP = lu.normalize(lu.cross(dx, dy)) vdxyP = terrainVerts[(j - 1) * self.imageWidth + i - 1] vdxyN = terrainVerts[(j + 1) * self.imageWidth + i + 1] dxy = vdxyP - vdxyN vdyxP = terrainVerts[(j - 1) * self.imageWidth + i + 1] vdyxN = terrainVerts[(j + 1) * self.imageWidth + i - 1] dyx = vdyxP - vdyxN nD = lu.normalize(lu.cross(dxy, dyx)) terrainNormals[j * self.imageWidth + i] = lu.normalize(nP + nD) # join verts with quads that is: 2 triangles @ 3 vertices, with one less in each direction. terrainInds = [0] * 2 * 3 * (self.imageWidth - 1) * (self.imageHeight - 1) for j in range(0, self.imageHeight - 1): for i in range(0, self.imageWidth - 1): # Vertex indices to the four corners of the quad. qInds = [ j * self.imageWidth + i, j * self.imageWidth + i + 1, (j + 1) * self.imageWidth + i, (j + 1) * self.imageWidth + i + 1, ] outOffset = 3 * 2 * (j * (self.imageWidth - 1) + i) points = [ terrainVerts[qInds[0]], terrainVerts[qInds[1]], terrainVerts[qInds[2]], terrainVerts[qInds[3]], ] # output first triangle: terrainInds[outOffset + 0] = qInds[0] terrainInds[outOffset + 1] = qInds[1] terrainInds[outOffset + 2] = qInds[2] # second triangle terrainInds[outOffset + 3] = qInds[2] terrainInds[outOffset + 4] = qInds[1] terrainInds[outOffset + 5] = qInds[3] self.terrainInds = terrainInds self.vertexArrayObject = lu.createVertexArrayObject() self.vertexDataBuffer = lu.createAndAddVertexArrayData( self.vertexArrayObject, terrainVerts, 0) self.normalDataBuffer = lu.createAndAddVertexArrayData( self.vertexArrayObject, terrainNormals, 1) self.indexDataBuffer = lu.createAndAddIndexArray( self.vertexArrayObject, terrainInds) #normalDataBuffer = createAndAddVertexArrayData<vec4>(g_particleVao, { vec4(0.0f) }, 1); vertexShader = """ #version 330 in vec3 positionIn; in vec3 normalIn; uniform mat4 modelToClipTransform; uniform mat4 modelToViewTransform; uniform mat3 modelToViewNormalTransform; uniform float terrainHeightScale; uniform float terrainTextureXyScale; uniform vec2 xyNormScale; uniform vec2 xyOffset; // 'out' variables declared in a vertex shader can be accessed in the subsequent stages. // For a fragment shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front here and in the fragment shader!). out VertexData { float v2f_height; vec3 v2f_viewSpacePosition; vec3 v2f_viewSpaceNormal; vec3 v2f_worldSpacePosition; }; void main() { // pass the world-space Z to the fragment shader, as it is used to compute the colour and other things v2f_height = positionIn.z; v2f_worldSpacePosition = positionIn; v2f_viewSpacePosition = (modelToViewTransform * vec4(positionIn, 1.0)).xyz; v2f_viewSpaceNormal = modelToViewNormalTransform * normalIn; // gl_Position is a buit-in 'out'-variable that gets passed on to the clipping and rasterization stages (hardware fixed function). // it must be written by the vertex shader in order to produce any drawn geometry. // We transform the position using one matrix multiply from model to clip space. Note the added 1 at the end of the position to make the 3D // coordinate homogeneous. gl_Position = modelToClipTransform * vec4(positionIn, 1.0); } """ fragmentShader = """ // Input from the vertex shader, will contain the interpolated (i.e., area weighted average) vaule out put for each of the three vertex shaders that // produced the vertex data for the triangle this fragmet is part of. in VertexData { float v2f_height; vec3 v2f_viewSpacePosition; vec3 v2f_viewSpaceNormal; vec3 v2f_worldSpacePosition; }; uniform float terrainHeightScale; uniform float terrainTextureXyScale; out vec4 fragmentColor; void main() { vec3 materialColour = vec3(v2f_height/terrainHeightScale); // TODO 1.4: Compute the texture coordinates and sample the texture for the grass and use as material colour. vec3 reflectedLight = computeShading(materialColour, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour); fragmentColor = vec4(toSrgb(reflectedLight), 1.0); //fragmentColor = vec4(toSrgb(vec3(v2f_height/terrainHeightScale)), 1.0); } """ # Note how we provide lists of source code strings for the two shader stages. # This is basically the only standard way to 'include' or 'import' code into more than one shader. The variable renderingSystem.commonFragmentShaderCode # contains code that we wish to use in all the fragment shaders, for example code to transform the colour output to srgb. # It is also a nice place to put code to compute lighting and other effects that should be the same accross the terrain and racer for example. self.shader = lu.buildShader([vertexShader], [ "#version 330\n", renderingSystem.commonFragmentShaderCode, fragmentShader ], { "positionIn": 0, "normalIn": 1 }) # TODO 1.4: Load texture and configure the sampler ObjModel.loadTexture('grass2.png', 'F:\COSC3000_GC\Project\mega_racer\data', self.imageData) loc = glGetUniformLocation(self.shader, "someTexture") glUniform1i(loc, 0)
def load(self, imageName, renderingSystem): with Image.open(imageName) as im: self.imageWidth = im.size[0] self.imageHeight = im.size[1] self.imageData = im.tobytes("raw", "RGBX" if im.mode == 'RGB' else "RGBA", 0, -1) xyOffset = -vec2(float(self.imageWidth), float( self.imageHeight)) * self.xyScale / 2.0 # Calculate vertex positions terrainVerts = [] for j in range(self.imageHeight): for i in range(self.imageWidth): offset = (j * self.imageWidth + i) * 4 # copy pixel 4 channels imagePixel = self.imageData[offset:offset + 4] # Normalize the red channel from [0,255] to [0.0, 1.0] red = float(imagePixel[0]) / 255.0 xyPos = vec2(i, j) * self.xyScale + xyOffset # TODO 1.1: set the height zPos = self.heightScale * red pt = vec3(xyPos[0], xyPos[1], zPos) terrainVerts.append(pt) green = imagePixel[1] if green == 255: self.startLocations.append(pt) if green == 128: self.treeLocations.append(pt) if green == 64: self.rockLocations.append(pt) # build vertex normals... terrainNormals = [vec3(0.0, 0.0, 1.0) ] * self.imageWidth * self.imageHeight for j in range(1, self.imageHeight - 1): for i in range(1, self.imageWidth - 1): v = terrainVerts[j * self.imageWidth + i] vxP = terrainVerts[j * self.imageWidth + i - 1] vxN = terrainVerts[j * self.imageWidth + i + 1] dx = vxP - vxN vyP = terrainVerts[(j - 1) * self.imageWidth + i] vyN = terrainVerts[(j + 1) * self.imageWidth + i] dy = vyP - vyN nP = lu.normalize(lu.cross(dx, dy)) vdxyP = terrainVerts[(j - 1) * self.imageWidth + i - 1] vdxyN = terrainVerts[(j + 1) * self.imageWidth + i + 1] dxy = vdxyP - vdxyN vdyxP = terrainVerts[(j - 1) * self.imageWidth + i + 1] vdyxN = terrainVerts[(j + 1) * self.imageWidth + i - 1] dyx = vdyxP - vdyxN nD = lu.normalize(lu.cross(dxy, dyx)) terrainNormals[j * self.imageWidth + i] = lu.normalize(nP + nD) # join verts with quads that is: 2 triangles @ 3 vertices, with one less in each direction. terrainInds = [0] * 2 * 3 * (self.imageWidth - 1) * (self.imageHeight - 1) for j in range(0, self.imageHeight - 1): for i in range(0, self.imageWidth - 1): # Vertex indices to the four corners of the quad. qInds = [ j * self.imageWidth + i, j * self.imageWidth + i + 1, (j + 1) * self.imageWidth + i, (j + 1) * self.imageWidth + i + 1, ] outOffset = 3 * 2 * (j * (self.imageWidth - 1) + i) points = [ terrainVerts[qInds[0]], terrainVerts[qInds[1]], terrainVerts[qInds[2]], terrainVerts[qInds[3]], ] # output first triangle: terrainInds[outOffset + 0] = qInds[0] terrainInds[outOffset + 1] = qInds[1] terrainInds[outOffset + 2] = qInds[2] # second triangle terrainInds[outOffset + 3] = qInds[2] terrainInds[outOffset + 4] = qInds[1] terrainInds[outOffset + 5] = qInds[3] self.terrainInds = terrainInds self.vertexArrayObject = lu.createVertexArrayObject() self.vertexDataBuffer = lu.createAndAddVertexArrayData( self.vertexArrayObject, terrainVerts, 0) self.normalDataBuffer = lu.createAndAddVertexArrayData( self.vertexArrayObject, terrainNormals, 1) self.indexDataBuffer = lu.createAndAddIndexArray( self.vertexArrayObject, terrainInds) #normalDataBuffer = createAndAddVertexArrayData<vec4>(g_particleVao, { vec4(0.0f) }, 1); vertexShader = """ #version 330 in vec3 positionIn; in vec3 normalIn; uniform mat4 worldToViewTransform; uniform mat4 modelToClipTransform; uniform mat4 modelToViewTransform; uniform mat3 modelToViewNormalTransform; uniform mat4 lightPOVTransform; uniform sampler2D terrainDataSampler; uniform float terrainHeightScale; uniform float terrainTextureXyScale; uniform vec2 xyNormScale; uniform vec2 xyOffset; // 'out' variables declared in a vertex shader can be accessed in the subsequent stages. // For a fragment shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front here and in the fragment shader!). out VertexData { float v2f_height; vec3 v2f_viewSpacePosition; vec3 v2f_viewSpaceNormal; vec3 v2f_worldSpacePosition; vec2 normalizedXYcoords; float distance; vec3 viewToVertexPosition; vec3 worldSpaceNormal; vec4 fragPosLightSpace; vec3 cameraPosInWorldSpace; }; void main() { // pass the world-space Z to the fragment shader, as it is used to compute the colour and other things v2f_height = positionIn.z; v2f_worldSpacePosition = positionIn; v2f_viewSpacePosition = (modelToViewTransform * vec4(positionIn, 1.0)).xyz; v2f_viewSpaceNormal = modelToViewNormalTransform * normalIn; worldSpaceNormal = normalIn; normalizedXYcoords = positionIn.xy * xyNormScale + xyOffset; distance = -v2f_viewSpacePosition.z; //first use the worldToViewTransform to get the camera world space coords cameraPosInWorldSpace = vec3(worldToViewTransform[3][0],worldToViewTransform[3][1],worldToViewTransform[3][2]); viewToVertexPosition = normalize(positionIn - cameraPosInWorldSpace); // gl_Position is a buit-in 'out'-variable that gets passed on to the clipping and rasterization stages (hardware fixed function). // it must be written by the vertex shader in order to produce any drawn geometry. // We transform the position using one matrix multiply from model to clip space. Note the added 1 at the end of the position to make the 3D // coordinate homogeneous. fragPosLightSpace = lightPOVTransform * vec4(positionIn, 1.0); gl_Position = modelToClipTransform * vec4(positionIn, 1.0); } """ fragmentShader = """ // Input from the vertex shader, will contain the interpolated (i.e., area weighted average) vaule out put for each of the three vertex shaders that // produced the vertex data for the triangle this fragmet is part of. in VertexData { float v2f_height; vec3 v2f_viewSpacePosition; vec3 v2f_viewSpaceNormal; vec3 v2f_worldSpacePosition; vec2 normalizedXYcoords; float distance; //camera to geometry distance vec3 viewToVertexPosition; vec3 worldSpaceNormal; vec4 fragPosLightSpace; vec3 cameraPosInWorldSpace; }; uniform float terrainHeightScale; uniform float terrainTextureXyScale; uniform sampler2D terrainTexture; uniform sampler2D roadTexture; uniform sampler2D highTexture; uniform sampler2D steepTexture; uniform sampler2D terrainDataSample; // uniform sampler2D specularGrassTexture; uniform sampler2D specularHighTexture; uniform sampler2D specularRoadTexture; uniform sampler2D specularSteepTexture; // out vec4 fragmentColor; void main() { // trying height = 0.7 / steep 0.5 //vec3 materialColour = vec3(v2f_height/terrainHeightScale); // TODO 1.4: Compute the texture coordinates and sample the texture for the grass and use as material colour. vec3 materialDiffuse; vec3 materialSpecular; float steepThreshold = 0.959931; //roughly 55 degrees rad float steepness = acos(dot(normalize(worldSpaceNormal), vec3(0,0,1))); vec3 blueChannel = texture(terrainDataSample, normalizedXYcoords).xyz; float matSpecExp; vec3 reflectedLight; if(blueChannel.b == 1.0) { materialDiffuse = texture(roadTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz; materialSpecular = texture(specularRoadTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz; reflectedLight = computeShadingDiffuse(materialDiffuse, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, fragPosLightSpace); } else if(steepness > steepThreshold) { materialDiffuse = texture(steepTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz; materialSpecular = texture(specularSteepTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz; reflectedLight = computeShadingDiffuse(materialDiffuse, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, fragPosLightSpace); } else if (v2f_height > 55) { materialDiffuse = texture(highTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz; materialSpecular = texture(specularHighTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz; matSpecExp = 50.0; reflectedLight = computeShadingSpecular(materialDiffuse, materialSpecular, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, matSpecExp, fragPosLightSpace); } else { materialDiffuse = texture(terrainTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz; materialSpecular = texture(specularGrassTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz; matSpecExp = 150.0; reflectedLight = computeShadingSpecular(materialDiffuse, materialSpecular, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, matSpecExp, fragPosLightSpace); } //float depthValue = texture(shadowMapTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).r; //fragmentColor = vec4(vec3(depthValue), 1.0); fragmentColor = vec4(toSrgb(applyFog(reflectedLight,distance, cameraPosInWorldSpace, viewToVertexPosition)), 1.0); //fragmentColor = vec4(toSrgb(vec3(v2f_height/terrainHeightScale)), 1.0); } """ # Note how we provide lists of source code strings for the two shader stages. # This is basically the only standard way to 'include' or 'import' code into more than one shader. The variable renderingSystem.commonFragmentShaderCode # contains code that we wish to use in all the fragment shaders, for example code to transform the colour output to srgb. # It is also a nice place to put code to compute lighting and other effects that should be the same accross the terrain and racer for example. self.shader = lu.buildShader([vertexShader], [ "#version 330\n", renderingSystem.commonFragmentShaderCode, fragmentShader ], { "positionIn": 0, "normalIn": 1 }) # TODO 1.4: Load texture and configure the sampler self.terrainTexId = ObjModel.loadTexture("data/grass2.png", "", True) self.highTexId = ObjModel.loadTexture("data/rock 2.png", "", True) self.roadTexId = ObjModel.loadTexture("data/paving 5.png", "", True) self.steepTexId = ObjModel.loadTexture("data/rock 5.png", "", True) self.specGrassTexId = ObjModel.loadTexture("data/grass_specular.png", "", True) self.specHighTexId = ObjModel.loadTexture("data/high_specular.png", "", True) self.specSteepTexId = ObjModel.loadTexture("data/steep_specular.png", "", True) self.specRoadTexId = ObjModel.loadTexture("data/road_specular.png", "", True) self.terrainDataSampleTexId = ObjModel.loadTexture( "data/track_01_128.png", "", False)
g_backGroundColour = [0.1, 0.2, 0.1] g_fov = 60.0 g_nearDistance = 0.2 g_farDistance = 2000.0 g_viewPosition = [100.0, 100.0, 100.0] g_viewTarget = [0.0, 0.0, 0.0] g_viewUp = [0.0, 0.0, 1.0] g_followCamOffset = 40.0 g_followCamLookOffset = 10.0 g_sunStartPosition = [0.0, 0.0, 1000.0] g_sunPosition = g_sunStartPosition g_globalAmbientLight = vec3(0.045, 0.045, 0.045) g_sunLightColour = [0.9, 0.8, 0.7] g_updateSun = True g_sunAngle = 0.0 g_terrain = None g_racer = None g_props = [] g_lightPos = vec3(0, 0, 0) g_lightAngle = 25 g_lightColourAndIntensity = lu.vec3(0.9, 0.9, 0.6) # # Key-frames for the sun light and ambient, picked by hand-waving to look ok. Note how most of this is nonsense from a physical point of view and
def render(self, view, renderingSystem): # TODO 1.3: This is a good place to draw the racer model instead of the sphere modelToWorldTransform = lu.make_mat4_from_zAxis( self.position, self.heading, vec3(0, 0, 1)) renderingSystem.drawObjModel(self.model, modelToWorldTransform, view)
import magic # We import the 'lab_utils' module as 'lu' to save a bit of typing while still clearly marking where the code came from. import lab_utils as lu g_cameraDistance = 1.0 g_cameraYawDeg = 0.0 g_cameraPitchDeg = 0.0 g_yFovDeg = 45.0 g_lightYaw = 25.0 g_lightYawSpeed = 0.0 #145.0 g_lightPitch = -75.0 g_lightPitchSpeed = 0.0 #30.0 g_lightDistance = 250.0 g_lightColourAndIntensity = lu.vec3(0.9, 0.9, 0.6) g_ambientLightColourAndIntensity = lu.vec3(0.1) white = (1, 1, 1, 1) def draw_rectangle(vertices, transform, colour, lightPosition): magic.drawVertexDataAsTrianglesColour( [vertices[0], vertices[1], vertices[3]], transform, colour, lightPosition) magic.drawVertexDataAsTrianglesColour( [vertices[1], vertices[2], vertices[3]], transform, colour, lightPosition) def draw_base_court(transform, lightPosition):
def renderFrame(xOffset, width, height, time, textures, vao): global g_camera global g_yFovDeg global g_model lightPosition = lu.vec3(0, 0, 0) sunPosition = lu.vec3(0, 0, 0) mercuryPosition = getPosition(80, 0.01, time) saturnPosition = getPosition(300, 0.05, time) experimentPosition = getPosition(150, 0.02, time) moonPosition = getPosition(25, 0.1, time) # This configures the fixed-function transformation from Normalized Device Coordinates (NDC) # to the screen (pixels - called 'window coordinates' in OpenGL documentation). # See: https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glViewport.xhtml glViewport(xOffset, 0, width, height) # Set the colour we want the frame buffer cleared to, glClearColor(0.0, 0.0, 0.0, 1.0) # Tell OpenGL to clear the render target to the clear values for both depth and colour buffers (depth uses the default) glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT) worldToViewTransform = g_camera.getWorldToViewMatrix([0, 1, 0]) viewToClipTransform = lu.make_perspective(g_yFovDeg, width / height, 0.1, 1500.0) glEnable(GL_BLEND) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) Sphere.drawSphereWithTexture( experimentPosition, 20.0, "planet1.png", viewToClipTransform, worldToViewTransform, textures, vao, lu.transformPoint(worldToViewTransform, lightPosition), "shaders/allVertexShader.glsl", "shaders/planetFragmentShader.glsl") Sphere.drawSphereWithTexture( mercuryPosition, 10.0, "planet2.png", viewToClipTransform, worldToViewTransform, textures, vao, lu.transformPoint(worldToViewTransform, lightPosition), "shaders/allVertexShader.glsl", "shaders/planetFragmentShader.glsl") Sphere.drawSphereWithTexture( saturnPosition, 10.0, "fire.png", viewToClipTransform, worldToViewTransform, textures, vao, lu.transformPoint(worldToViewTransform, lightPosition), "shaders/allVertexShader.glsl", "shaders/planetFragmentShader.glsl") randomNo = random.random() Sphere.drawSphereWithTexture( sunPosition, 15.0, "sun.png", viewToClipTransform, worldToViewTransform, textures, vao, lu.transformPoint(worldToViewTransform, lightPosition), "shaders/allVertexShader.glsl", "shaders/sunFragmentShader.glsl") Sphere.drawSphereWithTexture( sunPosition, 16.0 + 1 * randomNo, "sun.png", viewToClipTransform, worldToViewTransform, textures, vao, lu.transformPoint(worldToViewTransform, lightPosition), "shaders/allVertexShader.glsl", "shaders/cheekyShader.glsl") Sphere.drawSphereWithTexture( sunPosition, 17.0 + 2 * randomNo, "sun.png", viewToClipTransform, worldToViewTransform, textures, vao, lu.transformPoint(worldToViewTransform, lightPosition), "shaders/allVertexShader.glsl", "shaders/cheekyShader.glsl") Sphere.drawSphereWithTexture( sunPosition, 18.0 + 3 * randomNo, "sun.png", viewToClipTransform, worldToViewTransform, textures, vao, lu.transformPoint(worldToViewTransform, lightPosition), "shaders/allVertexShader.glsl", "shaders/cheekyShader.glsl") Sphere.drawSphereWithTexture( sunPosition, 20.0 + 5 * randomNo, "sun.png", viewToClipTransform, worldToViewTransform, textures, vao, lu.transformPoint(worldToViewTransform, lightPosition), "shaders/allVertexShader.glsl", "shaders/cheekyShader.1.glsl") Sphere.drawSphereWithTexture( sunPosition, 24.0 + 9 * randomNo, "sun.png", viewToClipTransform, worldToViewTransform, textures, vao, lu.transformPoint(worldToViewTransform, lightPosition), "shaders/allVertexShader.glsl", "shaders/cheekyShader.2.glsl") Sphere.drawSphereWithTexture( sunPosition, 700, "stars3.png", viewToClipTransform, worldToViewTransform, textures, vao, lu.transformPoint(worldToViewTransform, lightPosition), "shaders/allVertexShader.glsl", "shaders/sunFragmentShader.1.glsl") glDisable(GL_BLEND)
g_backGroundColour = [0.1, 0.2, 0.1 ] g_fov = 60.0 g_nearDistance = 0.2 g_farDistance = 2000.0 g_viewPosition = [ 100.0, 100.0, 100.0 ]; g_viewTarget = [ 0.0, 0.0, 0.0 ]; g_viewUp = [ 0.0, 0.0, 1.0 ]; g_followCamOffset = 25.0 g_followCamLookOffset = 10.0 g_sunStartPosition = [0.0, 0.0, 1000.0] g_sunPosition = g_sunStartPosition g_globalAmbientLight = vec3(0.045,0.045,0.045) g_sunLightColour = [0.9, 0.8, 0.7] g_updateSun = True g_sunAngle = 0.0 g_terrain = None g_racer = None #why not Racer() # # Key-frames for the sun light and ambient, picked by hand-waving to look ok. Note how most of this is nonsense from a physical point of view and # some of the reasoning is basically to compensate for the lack of exposure (or tone-mapping). # g_sunKeyFrames = [ [ -1.0, vec3(0.0, 0.0, 0.0) ], # midnight - no direct light, but we'll ramp up the ambient to make things look ok - should _really_ be done using HDR [ 0.0, vec3(0.0, 0.0, 0.0) ], # we want ull dark past the horizon line (this ixes shadow artiacts also)