示例#1
0
def buildShader(vertexShaderSource, fragmentShaderSource):
    shader = lu.buildShader(vertexShaderSource, fragmentShaderSource,
                            ObjModel.getDefaultAttributeBindings())
    if shader:
        glUseProgram(shader)
        ObjModel.setDefaultUniformBindings(shader)
        glUseProgram(0)
    return shader
示例#2
0
    def __init__(self, fileName):
        self.defaultTextureOne = glGenTextures(1)
        glBindTexture(GL_TEXTURE_2D, self.defaultTextureOne)
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_FLOAT,
                     [1.0, 1.0, 1.0, 1.0])

        self.defaultNormalTexture = glGenTextures(1)
        glBindTexture(GL_TEXTURE_2D, self.defaultNormalTexture)
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 1, 1, 0, GL_RGBA, GL_FLOAT,
                     [0.5, 0.5, 0.5, 1.0])
        glBindTexture(GL_TEXTURE_2D, 0)

        self.overrideDiffuseTextureWithDefault = False
        self.load(fileName)

        self.defaultShader = lu.buildShader(
            self.defaultVertexShader, self.defaultFragmentShader,
            ObjModel.getDefaultAttributeBindings())
        glUseProgram(self.defaultShader)
        ObjModel.setDefaultUniformBindings(self.defaultShader)
        glUseProgram(0)
示例#3
0
def reload_shader():
    """ Tries to reload the shader from source """
    global g_vert_shader_source
    global g_frag_shader_source
    global g_shader
    vert_shader = ""
    frag_shader = ""
    with open("vertShader.glsl") as source:
        vert_shader = source.read()
    with open("fragShader.glsl") as source:
        frag_shader = source.read()
    if vert_shader != g_vert_shader_source or frag_shader != g_frag_shader_source:
        new_shader = lu.buildShader(vert_shader, frag_shader,
                                    {"ndcPositionAttr": 0})
        if new_shader:
            if g_shader:
                glDeleteProgram(g_shader)
            g_shader = new_shader
            print("Reloaded shader, ok!")
            g_frag_shader_source = frag_shader
            g_vert_shader_source = vert_shader
示例#4
0
    def setupObjModelShader(self):
        self.objModelShader = lu.buildShader([
            """
                #version 330

                in vec3 positionAttribute;
                in vec3	normalAttribute;
                in vec2	texCoordAttribute;

                uniform mat4 modelToClipTransform;
                uniform mat4 modelToViewTransform;
                uniform mat3 modelToViewNormalTransform;

                // Out variables decalred in a vertex shader can be accessed in the subsequent stages.
                // For a pixel shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front, and also in the fragment shader!).
                out VertexData
                {
	                vec3 v2f_viewSpaceNormal;
	                vec3 v2f_viewSpacePosition;
	                vec2 v2f_texCoord;
                };

                void main() 
                {
	                // gl_Position is a buit in out variable that gets passed on to the clipping and rasterization stages.
                  // it must be written in order to produce any drawn geometry. 
                  // We transform the position using one matrix multiply from model to clip space, note the added 1 at the end of the position.
	                gl_Position = modelToClipTransform * vec4(positionAttribute, 1.0);
	                // We transform the normal to view space using the normal transform (which is the inverse-transpose of the rotation part of the modelToViewTransform)
                  // Just using the rotation is only valid if the matrix contains only rotation and uniform scaling.
	                v2f_viewSpaceNormal = normalize(modelToViewNormalTransform * normalAttribute);
	                v2f_viewSpacePosition = (modelToViewTransform * vec4(positionAttribute, 1.0)).xyz;
	                // The texture coordinate is just passed through
	                v2f_texCoord = texCoordAttribute;
                }
                """
        ], [
            "#version 330\n", self.commonFragmentShaderCode, """
                // Input from the vertex shader, will contain the interpolated (i.e., area-weighted average) vaule out put for each of the three vertex shaders that 
                // produced the vertex data for the triangle this fragmet is part of.
                in VertexData
                {
	                vec3 v2f_viewSpaceNormal;
	                vec3 v2f_viewSpacePosition;
	                vec2 v2f_texCoord;
                };

                // Material properties set by OBJModel.
                uniform vec3 material_diffuse_color; 
	            uniform float material_alpha;
                uniform vec3 material_specular_color; 
                uniform vec3 material_emissive_color; 
                uniform float material_specular_exponent;

                // Textures set by OBJModel 
                uniform sampler2D diffuse_texture;
                uniform sampler2D opacity_texture;
                uniform sampler2D specular_texture;
                uniform sampler2D normal_texture;

                out vec4 fragmentColor;

                void main() 
                {
	                // Manual alpha test (note: alpha test is no longer part of Opengl 3.3).
	                if (texture(opacity_texture, v2f_texCoord).r < 0.5)
	                {
		                discard;
	                }

	                vec3 materialDiffuse = texture(diffuse_texture, v2f_texCoord).xyz * material_diffuse_color;
                    vec3 materialSpecular = texture(diffuse_texture, v2f_texCoord).xyz * material_specular_color;
                    vec3 reflectedLight = computeShading(materialDiffuse,v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour) + material_emissive_color;
	                fragmentColor = vec4(toSrgb(reflectedLight), material_alpha);
                }
            """
        ], ObjModel.getDefaultAttributeBindings())
        glUseProgram(self.objModelShader)
        ObjModel.setDefaultUniformBindings(self.objModelShader)
        glUseProgram(0)
示例#5
0
    def load(self, imageName, renderingSystem):
        with Image.open(imageName) as im:
            self.imageWidth = im.size[0]
            self.imageHeight = im.size[1]
            self.imageData = im.tobytes("raw",
                                        "RGBX" if im.mode == 'RGB' else "RGBA",
                                        0, -1)

            xyOffset = -vec2(float(self.imageWidth), float(
                self.imageHeight)) * self.xyScale / 2.0

            # Calculate vertex positions
            terrainVerts = []
            for j in range(self.imageHeight):
                for i in range(self.imageWidth):
                    offset = (j * self.imageWidth + i) * 4
                    # copy pixel 4 channels
                    imagePixel = self.imageData[offset:offset + 4]
                    # Normalize the red channel from [0,255] to [0.0, 1.0]
                    red = float(imagePixel[0]) / 255.0

                    xyPos = vec2(i, j) * self.xyScale + xyOffset
                    # TODO 1.1: set the height
                    #zPos = 0.0
                    zPos = red * self.heightScale  # Change from zero for 1.1
                    pt = vec3(xyPos[0], xyPos[1], zPos)
                    terrainVerts.append(pt)

                    green = imagePixel[1]
                    if green == 255:
                        self.startLocations.append(pt)
                    if green == 128:
                        self.treeLocations.append(pt)
                    if green == 64:
                        self.rockLocations.append(pt)

            # build vertex normals...
            terrainNormals = [vec3(0.0, 0.0, 1.0)
                              ] * self.imageWidth * self.imageHeight
            for j in range(1, self.imageHeight - 1):
                for i in range(1, self.imageWidth - 1):
                    v = terrainVerts[j * self.imageWidth + i]
                    vxP = terrainVerts[j * self.imageWidth + i - 1]
                    vxN = terrainVerts[j * self.imageWidth + i + 1]
                    dx = vxP - vxN

                    vyP = terrainVerts[(j - 1) * self.imageWidth + i]
                    vyN = terrainVerts[(j + 1) * self.imageWidth + i]
                    dy = vyP - vyN

                    nP = lu.normalize(lu.cross(dx, dy))

                    vdxyP = terrainVerts[(j - 1) * self.imageWidth + i - 1]
                    vdxyN = terrainVerts[(j + 1) * self.imageWidth + i + 1]
                    dxy = vdxyP - vdxyN

                    vdyxP = terrainVerts[(j - 1) * self.imageWidth + i + 1]
                    vdyxN = terrainVerts[(j + 1) * self.imageWidth + i - 1]
                    dyx = vdyxP - vdyxN

                    nD = lu.normalize(lu.cross(dxy, dyx))

                    terrainNormals[j * self.imageWidth + i] = lu.normalize(nP +
                                                                           nD)

            # join verts with quads that is: 2 triangles @ 3 vertices, with one less in each direction.
            terrainInds = [0] * 2 * 3 * (self.imageWidth -
                                         1) * (self.imageHeight - 1)
            for j in range(0, self.imageHeight - 1):
                for i in range(0, self.imageWidth - 1):
                    # Vertex indices to the four corners of the quad.
                    qInds = [
                        j * self.imageWidth + i,
                        j * self.imageWidth + i + 1,
                        (j + 1) * self.imageWidth + i,
                        (j + 1) * self.imageWidth + i + 1,
                    ]
                    outOffset = 3 * 2 * (j * (self.imageWidth - 1) + i)
                    points = [
                        terrainVerts[qInds[0]],
                        terrainVerts[qInds[1]],
                        terrainVerts[qInds[2]],
                        terrainVerts[qInds[3]],
                    ]
                    # output first triangle:
                    terrainInds[outOffset + 0] = qInds[0]
                    terrainInds[outOffset + 1] = qInds[1]
                    terrainInds[outOffset + 2] = qInds[2]
                    # second triangle
                    terrainInds[outOffset + 3] = qInds[2]
                    terrainInds[outOffset + 4] = qInds[1]
                    terrainInds[outOffset + 5] = qInds[3]

            self.terrainInds = terrainInds

            self.vertexArrayObject = lu.createVertexArrayObject()
            self.vertexDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainVerts, 0)
            self.normalDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainNormals, 1)
            self.indexDataBuffer = lu.createAndAddIndexArray(
                self.vertexArrayObject, terrainInds)

            #normalDataBuffer = createAndAddVertexArrayData<vec4>(g_particleVao, { vec4(0.0f) }, 1);

        vertexShader = """
            #version 330
            in vec3 positionIn;
            in vec3 normalIn;

            uniform mat4 modelToClipTransform;
            uniform mat4 modelToViewTransform;
            uniform mat3 modelToViewNormalTransform;

            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;
            uniform vec2 xyNormScale;
            uniform vec2 xyOffset;

            //2.2
            uniform mat4 worldToViewTransform;


            // 'out' variables declared in a vertex shader can be accessed in the subsequent stages.
            // For a fragment shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front here and in the fragment shader!).
            out VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;
                vec3 v2f_worldSpaceNormal; // 2.1 - Steep

                vec2 v2f_xyNormScale; // 2.1 - Road
                vec2 v2f_xyOffset; // 2.1 - Road

                // 2.2
                vec3 cameraPosition;
                vec3 cameraToPointVector;

            };

            void main() 
            {
                // pass the world-space Z to the fragment shader, as it is used to compute the colour and other things
	            v2f_height = positionIn.z;
                v2f_worldSpacePosition = positionIn;
                v2f_viewSpacePosition = (modelToViewTransform * vec4(positionIn, 1.0)).xyz;
                v2f_viewSpaceNormal = modelToViewNormalTransform * normalIn;

                v2f_worldSpaceNormal = normalIn; //2.1 - Steep
                v2f_xyNormScale = xyNormScale; // 2.1 - Road
                v2f_xyOffset = xyOffset; // 2.1 - Road

                //2.2
                cameraPosition = vec3(worldToViewTransform[3][0],worldToViewTransform[3][1],worldToViewTransform[3][2]);
                cameraToPointVector = normalize(positionIn - cameraPosition);

	            // gl_Position is a buit-in 'out'-variable that gets passed on to the clipping and rasterization stages (hardware fixed function).
                // it must be written by the vertex shader in order to produce any drawn geometry. 
                // We transform the position using one matrix multiply from model to clip space. Note the added 1 at the end of the position to make the 3D
                // coordinate homogeneous.
	            gl_Position = modelToClipTransform * vec4(positionIn, 1.0);
            }
"""

        fragmentShader = """
            // Input from the vertex shader, will contain the interpolated (i.e., area weighted average) vaule out put for each of the three vertex shaders that 
            // produced the vertex data for the triangle this fragmet is part of.
            in VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;

                vec3 v2f_worldSpaceNormal;  //2.1 - Steep
                vec2 v2f_xyNormScale; // 2.1 - Road
                vec2 v2f_xyOffset; // 2.1 - Road

                // 2.2
                vec3 cameraPosition;
                vec3 cameraToPointVector;
            };

            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;
            // 1.4
            uniform sampler2D grassTexture; 
            // Olympics
            uniform sampler2D wallTexture;
            uniform sampler2D seatsTexture;   
            uniform sampler2D trackTexture;
            uniform sampler2D mapTexture;
            uniform sampler2D concreteTexture;

            out vec4 fragmentColor;

            void main() 
            {
                vec3 materialColour = vec3(v2f_height/terrainHeightScale);
                // Default colour 
                vec3 concreteColour = texture(concreteTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz;
                materialColour = concreteColour;

                // 2.1                 
                float slope = dot(v2f_worldSpaceNormal, vec3(v2f_worldSpaceNormal.x, 0.0, v2f_worldSpaceNormal.z)); //Steep 
                float blueChannel = texture(mapTexture, (v2f_worldSpacePosition.xy - v2f_xyOffset) * v2f_xyNormScale).z; //Road

                // Track Texture
                if (blueChannel >= 0.9) {
                    vec3 trackColour = texture(trackTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz;
                    materialColour = trackColour;
                // Grass texture
                } else if (v2f_height < 1) {
                    vec3 grassColour = texture(grassTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz;
                    materialColour = grassColour; 
                // Wall/Banner texture
                } else if ((v2f_height < 11) && (slope < 0.2)) {
                    vec3 wallColour = texture(wallTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz;
                    materialColour = wallColour;                  
                // Seats
                } else if (slope < 0.2) {
                    vec3 seatsColour = texture(seatsTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz;
                    materialColour = seatsColour; 
                } 
                vec3 reflectedLight = computeShading(materialColour, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour);
	            //fragmentColor = vec4(toSrgb(reflectedLight), 1.0); // before 2.2
	            //fragmentColor = vec4(toSrgb(vec3(v2f_height/terrainHeightScale)), 1.0); //start??

                //2.2 - Fog       
                //fragmentColor = vec4(toSrgb(applyFog(reflectedLight, -v2f_viewSpacePosition.z)), 1.0); // basic fog
                fragmentColor = vec4(toSrgb(applyFog(reflectedLight, -v2f_viewSpacePosition.z, cameraPosition, cameraToPointVector)), 1.0);
            }
"""
        # Note how we provide lists of source code strings for the two shader stages.
        # This is basically the only standard way to 'include' or 'import' code into more than one shader. The variable renderingSystem.commonFragmentShaderCode
        # contains code that we wish to use in all the fragment shaders, for example code to transform the colour output to srgb.
        # It is also a nice place to put code to compute lighting and other effects that should be the same accross the terrain and racer for example.
        self.shader = lu.buildShader([vertexShader], [
            "#version 330\n", renderingSystem.commonFragmentShaderCode,
            fragmentShader
        ], {
            "positionIn": 0,
            "normalIn": 1
        })

        # TODO 1.4: Load texture and configure the sampler
        self.grassTexture = ObjModel.loadTexture("grass.png", "data", True)

        # Olympics
        self.wallTexture = ObjModel.loadTexture("banner.png", "data", True)
        self.seatsTexture = ObjModel.loadTexture("seats.png", "data", True)
        self.trackTexture = ObjModel.loadTexture("track.png", "data", True)
        self.mapTexture = ObjModel.loadTexture("map.png", "data", False)
        self.concreteTexture = ObjModel.loadTexture("concrete.jpg", "data",
                                                    True)
示例#6
0
    def load(self, imageName, renderingSystem):

        with Image.open(imageName) as im:
            self.imageWidth = im.size[0]
            self.imageHeight = im.size[1]
            self.imageData = im.tobytes("raw",
                                        "RGBX" if im.mode == 'RGB' else "RGBA",
                                        0, -1)

            xyOffset = -vec2(float(self.imageWidth), float(
                self.imageHeight)) * self.xyScale / 2.0

            # Calculate vertex positions
            terrainVerts = []
            for j in range(self.imageHeight):
                for i in range(self.imageWidth):
                    offset = (j * self.imageWidth + i) * 4
                    # copy pixel 4 channels
                    imagePixel = self.imageData[offset:offset + 4]
                    # Normalize the red channel from [0,255] to [0.0, 1.0]
                    red = float(imagePixel[0]) / 255.0

                    xyPos = vec2(i, j) * self.xyScale + xyOffset
                    # TODO 1.1: set the height
                    zPos = self.heightScale * red
                    pt = vec3(xyPos[0], xyPos[1], zPos)
                    terrainVerts.append(pt)

                    green = imagePixel[1]
                    if green == 255:
                        self.startLocations.append(pt)
                    if green == 128:
                        self.treeLocations.append(pt)
                    if green == 64:
                        self.rockLocations.append(pt)

            # build vertex normals...
            terrainNormals = [vec3(0.0, 0.0, 1.0)
                              ] * self.imageWidth * self.imageHeight
            for j in range(1, self.imageHeight - 1):
                for i in range(1, self.imageWidth - 1):
                    v = terrainVerts[j * self.imageWidth + i]
                    vxP = terrainVerts[j * self.imageWidth + i - 1]
                    vxN = terrainVerts[j * self.imageWidth + i + 1]
                    dx = vxP - vxN

                    vyP = terrainVerts[(j - 1) * self.imageWidth + i]
                    vyN = terrainVerts[(j + 1) * self.imageWidth + i]
                    dy = vyP - vyN

                    nP = lu.normalize(lu.cross(dx, dy))

                    vdxyP = terrainVerts[(j - 1) * self.imageWidth + i - 1]
                    vdxyN = terrainVerts[(j + 1) * self.imageWidth + i + 1]
                    dxy = vdxyP - vdxyN

                    vdyxP = terrainVerts[(j - 1) * self.imageWidth + i + 1]
                    vdyxN = terrainVerts[(j + 1) * self.imageWidth + i - 1]
                    dyx = vdyxP - vdyxN

                    nD = lu.normalize(lu.cross(dxy, dyx))

                    terrainNormals[j * self.imageWidth + i] = lu.normalize(nP +
                                                                           nD)

            # join verts with quads that is: 2 triangles @ 3 vertices, with one less in each direction.
            terrainInds = [0] * 2 * 3 * (self.imageWidth -
                                         1) * (self.imageHeight - 1)
            for j in range(0, self.imageHeight - 1):
                for i in range(0, self.imageWidth - 1):
                    # Vertex indices to the four corners of the quad.
                    qInds = [
                        j * self.imageWidth + i,
                        j * self.imageWidth + i + 1,
                        (j + 1) * self.imageWidth + i,
                        (j + 1) * self.imageWidth + i + 1,
                    ]
                    outOffset = 3 * 2 * (j * (self.imageWidth - 1) + i)
                    points = [
                        terrainVerts[qInds[0]],
                        terrainVerts[qInds[1]],
                        terrainVerts[qInds[2]],
                        terrainVerts[qInds[3]],
                    ]
                    # output first triangle:
                    terrainInds[outOffset + 0] = qInds[0]
                    terrainInds[outOffset + 1] = qInds[1]
                    terrainInds[outOffset + 2] = qInds[2]
                    # second triangle
                    terrainInds[outOffset + 3] = qInds[2]
                    terrainInds[outOffset + 4] = qInds[1]
                    terrainInds[outOffset + 5] = qInds[3]

            self.terrainInds = terrainInds

            self.vertexArrayObject = lu.createVertexArrayObject()
            self.vertexDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainVerts, 0)
            self.normalDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainNormals, 1)
            self.indexDataBuffer = lu.createAndAddIndexArray(
                self.vertexArrayObject, terrainInds)

            #normalDataBuffer = createAndAddVertexArrayData<vec4>(g_particleVao, { vec4(0.0f) }, 1);

        vertexShader = """
            #version 330
            in vec3 positionIn;
            in vec3 normalIn;

            uniform mat4 worldToViewTransform;
            uniform mat4 modelToClipTransform;
            uniform mat4 modelToViewTransform;
            uniform mat3 modelToViewNormalTransform;
            uniform mat4 lightPOVTransform;
            
            uniform sampler2D terrainDataSampler;
            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;
            uniform vec2 xyNormScale;
            uniform vec2 xyOffset;
            

            // 'out' variables declared in a vertex shader can be accessed in the subsequent stages.
            // For a fragment shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front here and in the fragment shader!).
            out VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;
                vec2 normalizedXYcoords;
                float distance;
                vec3 viewToVertexPosition;
                vec3 worldSpaceNormal;
                vec4 fragPosLightSpace;
                vec3 cameraPosInWorldSpace;
            };

            void main() 
            {
                // pass the world-space Z to the fragment shader, as it is used to compute the colour and other things
	            v2f_height = positionIn.z;
                v2f_worldSpacePosition = positionIn;
                v2f_viewSpacePosition = (modelToViewTransform * vec4(positionIn, 1.0)).xyz;
                v2f_viewSpaceNormal = modelToViewNormalTransform * normalIn;
                worldSpaceNormal = normalIn;
                normalizedXYcoords = positionIn.xy * xyNormScale + xyOffset;
                distance = -v2f_viewSpacePosition.z;
                //first use the worldToViewTransform to get the camera world space coords
                cameraPosInWorldSpace = vec3(worldToViewTransform[3][0],worldToViewTransform[3][1],worldToViewTransform[3][2]);
                viewToVertexPosition = normalize(positionIn - cameraPosInWorldSpace);
	            // gl_Position is a buit-in 'out'-variable that gets passed on to the clipping and rasterization stages (hardware fixed function).
                // it must be written by the vertex shader in order to produce any drawn geometry. 
                // We transform the position using one matrix multiply from model to clip space. Note the added 1 at the end of the position to make the 3D
                // coordinate homogeneous.
                fragPosLightSpace = lightPOVTransform * vec4(positionIn, 1.0);
	            gl_Position = modelToClipTransform * vec4(positionIn, 1.0);
            }
"""

        fragmentShader = """
            // Input from the vertex shader, will contain the interpolated (i.e., area weighted average) vaule out put for each of the three vertex shaders that 
            // produced the vertex data for the triangle this fragmet is part of.
            in VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;
                vec2 normalizedXYcoords;
                float distance; //camera to geometry distance
                vec3 viewToVertexPosition;
                vec3 worldSpaceNormal;
                vec4 fragPosLightSpace;
                vec3 cameraPosInWorldSpace;
            };

            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;
            uniform sampler2D terrainTexture;
            uniform sampler2D roadTexture;
            uniform sampler2D highTexture;
            uniform sampler2D steepTexture;
            uniform sampler2D terrainDataSample;
            //
            uniform sampler2D specularGrassTexture;
            uniform sampler2D specularHighTexture;
            uniform sampler2D specularRoadTexture;
            uniform sampler2D specularSteepTexture;
            //
            out vec4 fragmentColor;

            void main() 
            {
                // trying height = 0.7 / steep 0.5
                //vec3 materialColour = vec3(v2f_height/terrainHeightScale);
                // TODO 1.4: Compute the texture coordinates and sample the texture for the grass and use as material colour.
                vec3 materialDiffuse;
                vec3 materialSpecular;
                float steepThreshold = 0.959931; //roughly 55 degrees rad
                float steepness = acos(dot(normalize(worldSpaceNormal), vec3(0,0,1)));
                vec3 blueChannel = texture(terrainDataSample, normalizedXYcoords).xyz;
                float matSpecExp;
                vec3 reflectedLight;
                
                if(blueChannel.b == 1.0)
                {
                    materialDiffuse = texture(roadTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    materialSpecular = texture(specularRoadTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    reflectedLight = computeShadingDiffuse(materialDiffuse, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, fragPosLightSpace);
                }
                else if(steepness > steepThreshold)
                {
                    materialDiffuse = texture(steepTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    materialSpecular = texture(specularSteepTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    reflectedLight = computeShadingDiffuse(materialDiffuse, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, fragPosLightSpace);
                }
                else if (v2f_height > 55)
                {
                    materialDiffuse = texture(highTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    materialSpecular = texture(specularHighTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    matSpecExp = 50.0;
                    reflectedLight = computeShadingSpecular(materialDiffuse, materialSpecular, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, matSpecExp,  fragPosLightSpace);
                }
                else
                {
                    materialDiffuse = texture(terrainTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    materialSpecular = texture(specularGrassTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    matSpecExp = 150.0;
                    reflectedLight = computeShadingSpecular(materialDiffuse, materialSpecular, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, matSpecExp,  fragPosLightSpace);
                }
                
                //float depthValue = texture(shadowMapTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).r;
                //fragmentColor = vec4(vec3(depthValue), 1.0);
	            fragmentColor = vec4(toSrgb(applyFog(reflectedLight,distance, cameraPosInWorldSpace, viewToVertexPosition)), 1.0);
	            //fragmentColor = vec4(toSrgb(vec3(v2f_height/terrainHeightScale)), 1.0);

            }
"""
        # Note how we provide lists of source code strings for the two shader stages.
        # This is basically the only standard way to 'include' or 'import' code into more than one shader. The variable renderingSystem.commonFragmentShaderCode
        # contains code that we wish to use in all the fragment shaders, for example code to transform the colour output to srgb.
        # It is also a nice place to put code to compute lighting and other effects that should be the same accross the terrain and racer for example.
        self.shader = lu.buildShader([vertexShader], [
            "#version 330\n", renderingSystem.commonFragmentShaderCode,
            fragmentShader
        ], {
            "positionIn": 0,
            "normalIn": 1
        })

        # TODO 1.4: Load texture and configure the sampler
        self.terrainTexId = ObjModel.loadTexture("data/grass2.png", "", True)
        self.highTexId = ObjModel.loadTexture("data/rock 2.png", "", True)
        self.roadTexId = ObjModel.loadTexture("data/paving 5.png", "", True)
        self.steepTexId = ObjModel.loadTexture("data/rock 5.png", "", True)
        self.specGrassTexId = ObjModel.loadTexture("data/grass_specular.png",
                                                   "", True)
        self.specHighTexId = ObjModel.loadTexture("data/high_specular.png", "",
                                                  True)
        self.specSteepTexId = ObjModel.loadTexture("data/steep_specular.png",
                                                   "", True)
        self.specRoadTexId = ObjModel.loadTexture("data/road_specular.png", "",
                                                  True)
        self.terrainDataSampleTexId = ObjModel.loadTexture(
            "data/track_01_128.png", "", False)
示例#7
0
    def load(self, imageName, renderingSystem):
        with Image.open(imageName) as im:
            self.imageWidth = im.size[0]
            self.imageHeight = im.size[1]
            self.imageData = im.tobytes("raw",
                                        "RGBX" if im.mode == 'RGB' else "RGBA",
                                        0, -1)

            xyOffset = -vec2(float(self.imageWidth), float(
                self.imageHeight)) * self.xyScale / 2.0

            # Calculate vertex positions
            terrainVerts = []
            for j in range(self.imageHeight):
                for i in range(self.imageWidth):
                    offset = (j * self.imageWidth + i) * 4
                    # copy pixel 4 channels
                    imagePixel = self.imageData[offset:offset + 4]
                    # Normalize the red channel from [0,255] to [0.0, 1.0]
                    red = float(imagePixel[0]) / 255.0

                    xyPos = vec2(i, j) * self.xyScale + xyOffset
                    # TODO 1.1: set the height
                    zPos = self.heightScale * red

                    pt = vec3(xyPos[0], xyPos[1], zPos)
                    terrainVerts.append(pt)

                    green = imagePixel[1]
                    if green == 255:
                        self.startLocations.append(pt)
                    if green == 128:
                        self.treeLocations.append(pt)
                    if green == 64:
                        self.rockLocations.append(pt)

            # build vertex normals...
            terrainNormals = [vec3(0.0, 0.0, 1.0)
                              ] * self.imageWidth * self.imageHeight
            for j in range(1, self.imageHeight - 1):
                for i in range(1, self.imageWidth - 1):
                    v = terrainVerts[j * self.imageWidth + i]
                    vxP = terrainVerts[j * self.imageWidth + i - 1]
                    vxN = terrainVerts[j * self.imageWidth + i + 1]
                    dx = vxP - vxN

                    vyP = terrainVerts[(j - 1) * self.imageWidth + i]
                    vyN = terrainVerts[(j + 1) * self.imageWidth + i]
                    dy = vyP - vyN

                    nP = lu.normalize(lu.cross(dx, dy))

                    vdxyP = terrainVerts[(j - 1) * self.imageWidth + i - 1]
                    vdxyN = terrainVerts[(j + 1) * self.imageWidth + i + 1]
                    dxy = vdxyP - vdxyN

                    vdyxP = terrainVerts[(j - 1) * self.imageWidth + i + 1]
                    vdyxN = terrainVerts[(j + 1) * self.imageWidth + i - 1]
                    dyx = vdyxP - vdyxN

                    nD = lu.normalize(lu.cross(dxy, dyx))

                    terrainNormals[j * self.imageWidth + i] = lu.normalize(nP +
                                                                           nD)

            # join verts with quads that is: 2 triangles @ 3 vertices, with one less in each direction.
            terrainInds = [0] * 2 * 3 * (self.imageWidth -
                                         1) * (self.imageHeight - 1)
            for j in range(0, self.imageHeight - 1):
                for i in range(0, self.imageWidth - 1):
                    # Vertex indices to the four corners of the quad.
                    qInds = [
                        j * self.imageWidth + i,
                        j * self.imageWidth + i + 1,
                        (j + 1) * self.imageWidth + i,
                        (j + 1) * self.imageWidth + i + 1,
                    ]
                    outOffset = 3 * 2 * (j * (self.imageWidth - 1) + i)
                    points = [
                        terrainVerts[qInds[0]],
                        terrainVerts[qInds[1]],
                        terrainVerts[qInds[2]],
                        terrainVerts[qInds[3]],
                    ]
                    # output first triangle:
                    terrainInds[outOffset + 0] = qInds[0]
                    terrainInds[outOffset + 1] = qInds[1]
                    terrainInds[outOffset + 2] = qInds[2]
                    # second triangle
                    terrainInds[outOffset + 3] = qInds[2]
                    terrainInds[outOffset + 4] = qInds[1]
                    terrainInds[outOffset + 5] = qInds[3]

            self.terrainInds = terrainInds

            self.vertexArrayObject = lu.createVertexArrayObject()
            self.vertexDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainVerts, 0)
            self.normalDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainNormals, 1)
            self.indexDataBuffer = lu.createAndAddIndexArray(
                self.vertexArrayObject, terrainInds)

            #normalDataBuffer = createAndAddVertexArrayData<vec4>(g_particleVao, { vec4(0.0f) }, 1);

        vertexShader = """
            #version 330
            in vec3 positionIn;
            in vec3 normalIn;

            uniform mat4 modelToClipTransform;
            uniform mat4 modelToViewTransform;
            uniform mat3 modelToViewNormalTransform;

            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;
            uniform vec2 xyNormScale;
            uniform vec2 xyOffset;


            // 'out' variables declared in a vertex shader can be accessed in the subsequent stages.
            // For a fragment shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front here and in the fragment shader!).
            out VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;
            };

            void main() 
            {
                // pass the world-space Z to the fragment shader, as it is used to compute the colour and other things
	            v2f_height = positionIn.z;
                v2f_worldSpacePosition = positionIn;
                v2f_viewSpacePosition = (modelToViewTransform * vec4(positionIn, 1.0)).xyz;
                v2f_viewSpaceNormal = modelToViewNormalTransform * normalIn;

	            // gl_Position is a buit-in 'out'-variable that gets passed on to the clipping and rasterization stages (hardware fixed function).
                // it must be written by the vertex shader in order to produce any drawn geometry. 
                // We transform the position using one matrix multiply from model to clip space. Note the added 1 at the end of the position to make the 3D
                // coordinate homogeneous.
	            gl_Position = modelToClipTransform * vec4(positionIn, 1.0);
            }
"""

        fragmentShader = """
            // Input from the vertex shader, will contain the interpolated (i.e., area weighted average) vaule out put for each of the three vertex shaders that 
            // produced the vertex data for the triangle this fragmet is part of.
            in VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;
            };

            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;

            out vec4 fragmentColor;

            void main() 
            {
                vec3 materialColour = vec3(v2f_height/terrainHeightScale);
                // TODO 1.4: Compute the texture coordinates and sample the texture for the grass and use as material colour.

                vec3 reflectedLight = computeShading(materialColour, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour);
	            fragmentColor = vec4(toSrgb(reflectedLight), 1.0);
	            //fragmentColor = vec4(toSrgb(vec3(v2f_height/terrainHeightScale)), 1.0);

            }
"""
        # Note how we provide lists of source code strings for the two shader stages.
        # This is basically the only standard way to 'include' or 'import' code into more than one shader. The variable renderingSystem.commonFragmentShaderCode
        # contains code that we wish to use in all the fragment shaders, for example code to transform the colour output to srgb.
        # It is also a nice place to put code to compute lighting and other effects that should be the same accross the terrain and racer for example.
        self.shader = lu.buildShader([vertexShader], [
            "#version 330\n", renderingSystem.commonFragmentShaderCode,
            fragmentShader
        ], {
            "positionIn": 0,
            "normalIn": 1
        })

        # TODO 1.4: Load texture and configure the sampler
        ObjModel.loadTexture('grass2.png',
                             'F:\COSC3000_GC\Project\mega_racer\data',
                             self.imageData)
        loc = glGetUniformLocation(self.shader, "someTexture")
        glUniform1i(loc, 0)
示例#8
0
def buildShadowShader():
    return lu.buildShader(shadowVertShader(), shadowFragShader(),
                          ObjModel.getDefaultAttributeBindings())