Exemplo n.º 1
0
 def __init__(self, terrain):
     #loads each type of prop and stores in trees and rocks
     self.terrain = terrain
     for f in self.typeToFileNameList["rock"]:
         self.rocks.append((ObjModel(f[0]), f[1]))
     for f in self.typeToFileNameList["tree"]:
         self.trees.append((ObjModel(f[0]), f[1]))
Exemplo n.º 2
0
def buildShader(vertexShaderSource, fragmentShaderSource):
    shader = lu.buildShader(vertexShaderSource, fragmentShaderSource,
                            ObjModel.getDefaultAttributeBindings())
    if shader:
        glUseProgram(shader)
        ObjModel.setDefaultUniformBindings(shader)
        glUseProgram(0)
    return shader
Exemplo n.º 3
0
 def loadAllProps(self, terrain):
     # Load trees
     self.treeModel = ObjModel("data/trees/birch_01_d.obj")
     self.treeList = self.loadPropList(self.treeModel, self.treeMax,
                                       terrain.treeLocations)
     # Load rocks
     self.rockModel = ObjModel("data/rocks/rock_01.obj")
     self.rockList = self.loadPropList(self.rockModel, self.rockMax,
                                       terrain.rockLocations)
     # Create one large list
     self.allProps = self.treeList + self.rockList
Exemplo n.º 4
0
def loadModel(modelName):
    global g_model
    g_model = ObjModel("data/" + modelName)

    g_camera.target = g_model.centre
    g_camera.distance = lu.length(g_model.centre - g_model.aabbMin) * 3.1
    g_lightDistance = lu.length(g_model.centre - g_model.aabbMin) * 1.3
Exemplo n.º 5
0
 def loadProp(self, propType):
     propModel = ObjModel(
         "data/{propName}/{propName}.obj".format(propName=propType[0]))
     i = 0
     while i < propType[1]:
         prop = Prop()
         prop.load(propModel, propType[2])
         i += 1
         self.allProps.append(prop)
Exemplo n.º 6
0
    def setupObjModelShader(self):
        self.objModelShader = lu.buildShader([
            """
                #version 330

                in vec3 positionAttribute;
                in vec3	normalAttribute;
                in vec2	texCoordAttribute;

                uniform mat4 modelToClipTransform;
                uniform mat4 modelToViewTransform;
                uniform mat3 modelToViewNormalTransform;

                // Out variables decalred in a vertex shader can be accessed in the subsequent stages.
                // For a pixel shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front, and also in the fragment shader!).
                out VertexData
                {
	                vec3 v2f_viewSpaceNormal;
	                vec3 v2f_viewSpacePosition;
	                vec2 v2f_texCoord;
                };

                void main() 
                {
	                // gl_Position is a buit in out variable that gets passed on to the clipping and rasterization stages.
                  // it must be written in order to produce any drawn geometry. 
                  // We transform the position using one matrix multiply from model to clip space, note the added 1 at the end of the position.
	                gl_Position = modelToClipTransform * vec4(positionAttribute, 1.0);
	                // We transform the normal to view space using the normal transform (which is the inverse-transpose of the rotation part of the modelToViewTransform)
                  // Just using the rotation is only valid if the matrix contains only rotation and uniform scaling.
	                v2f_viewSpaceNormal = normalize(modelToViewNormalTransform * normalAttribute);
	                v2f_viewSpacePosition = (modelToViewTransform * vec4(positionAttribute, 1.0)).xyz;
	                // The texture coordinate is just passed through
	                v2f_texCoord = texCoordAttribute;
                }
                """
        ], [
            "#version 330\n", self.commonFragmentShaderCode, """
                // Input from the vertex shader, will contain the interpolated (i.e., area-weighted average) vaule out put for each of the three vertex shaders that 
                // produced the vertex data for the triangle this fragmet is part of.
                in VertexData
                {
	                vec3 v2f_viewSpaceNormal;
	                vec3 v2f_viewSpacePosition;
	                vec2 v2f_texCoord;
                };

                // Material properties set by OBJModel.
                uniform vec3 material_diffuse_color; 
	            uniform float material_alpha;
                uniform vec3 material_specular_color; 
                uniform vec3 material_emissive_color; 
                uniform float material_specular_exponent;

                // Textures set by OBJModel 
                uniform sampler2D diffuse_texture;
                uniform sampler2D opacity_texture;
                uniform sampler2D specular_texture;
                uniform sampler2D normal_texture;

                out vec4 fragmentColor;

                void main() 
                {
	                // Manual alpha test (note: alpha test is no longer part of Opengl 3.3).
	                if (texture(opacity_texture, v2f_texCoord).r < 0.5)
	                {
		                discard;
	                }

	                vec3 materialDiffuse = texture(diffuse_texture, v2f_texCoord).xyz * material_diffuse_color;
                    vec3 materialSpecular = texture(diffuse_texture, v2f_texCoord).xyz * material_specular_color;
                    vec3 reflectedLight = computeShading(materialDiffuse,v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour) + material_emissive_color;
	                fragmentColor = vec4(toSrgb(reflectedLight), material_alpha);
                }
            """
        ], ObjModel.getDefaultAttributeBindings())
        glUseProgram(self.objModelShader)
        ObjModel.setDefaultUniformBindings(self.objModelShader)
        glUseProgram(0)
Exemplo n.º 7
0
    def load(self, imageName, renderingSystem):
        with Image.open(imageName) as im:
            self.imageWidth = im.size[0]
            self.imageHeight = im.size[1]
            self.imageData = im.tobytes("raw",
                                        "RGBX" if im.mode == 'RGB' else "RGBA",
                                        0, -1)

            xyOffset = -vec2(float(self.imageWidth), float(
                self.imageHeight)) * self.xyScale / 2.0

            # Calculate vertex positions
            terrainVerts = []
            for j in range(self.imageHeight):
                for i in range(self.imageWidth):
                    offset = (j * self.imageWidth + i) * 4
                    # copy pixel 4 channels
                    imagePixel = self.imageData[offset:offset + 4]
                    # Normalize the red channel from [0,255] to [0.0, 1.0]
                    red = float(imagePixel[0]) / 255.0

                    xyPos = vec2(i, j) * self.xyScale + xyOffset
                    # TODO 1.1: set the height
                    #zPos = 0.0
                    zPos = red * self.heightScale  # Change from zero for 1.1
                    pt = vec3(xyPos[0], xyPos[1], zPos)
                    terrainVerts.append(pt)

                    green = imagePixel[1]
                    if green == 255:
                        self.startLocations.append(pt)
                    if green == 128:
                        self.treeLocations.append(pt)
                    if green == 64:
                        self.rockLocations.append(pt)

            # build vertex normals...
            terrainNormals = [vec3(0.0, 0.0, 1.0)
                              ] * self.imageWidth * self.imageHeight
            for j in range(1, self.imageHeight - 1):
                for i in range(1, self.imageWidth - 1):
                    v = terrainVerts[j * self.imageWidth + i]
                    vxP = terrainVerts[j * self.imageWidth + i - 1]
                    vxN = terrainVerts[j * self.imageWidth + i + 1]
                    dx = vxP - vxN

                    vyP = terrainVerts[(j - 1) * self.imageWidth + i]
                    vyN = terrainVerts[(j + 1) * self.imageWidth + i]
                    dy = vyP - vyN

                    nP = lu.normalize(lu.cross(dx, dy))

                    vdxyP = terrainVerts[(j - 1) * self.imageWidth + i - 1]
                    vdxyN = terrainVerts[(j + 1) * self.imageWidth + i + 1]
                    dxy = vdxyP - vdxyN

                    vdyxP = terrainVerts[(j - 1) * self.imageWidth + i + 1]
                    vdyxN = terrainVerts[(j + 1) * self.imageWidth + i - 1]
                    dyx = vdyxP - vdyxN

                    nD = lu.normalize(lu.cross(dxy, dyx))

                    terrainNormals[j * self.imageWidth + i] = lu.normalize(nP +
                                                                           nD)

            # join verts with quads that is: 2 triangles @ 3 vertices, with one less in each direction.
            terrainInds = [0] * 2 * 3 * (self.imageWidth -
                                         1) * (self.imageHeight - 1)
            for j in range(0, self.imageHeight - 1):
                for i in range(0, self.imageWidth - 1):
                    # Vertex indices to the four corners of the quad.
                    qInds = [
                        j * self.imageWidth + i,
                        j * self.imageWidth + i + 1,
                        (j + 1) * self.imageWidth + i,
                        (j + 1) * self.imageWidth + i + 1,
                    ]
                    outOffset = 3 * 2 * (j * (self.imageWidth - 1) + i)
                    points = [
                        terrainVerts[qInds[0]],
                        terrainVerts[qInds[1]],
                        terrainVerts[qInds[2]],
                        terrainVerts[qInds[3]],
                    ]
                    # output first triangle:
                    terrainInds[outOffset + 0] = qInds[0]
                    terrainInds[outOffset + 1] = qInds[1]
                    terrainInds[outOffset + 2] = qInds[2]
                    # second triangle
                    terrainInds[outOffset + 3] = qInds[2]
                    terrainInds[outOffset + 4] = qInds[1]
                    terrainInds[outOffset + 5] = qInds[3]

            self.terrainInds = terrainInds

            self.vertexArrayObject = lu.createVertexArrayObject()
            self.vertexDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainVerts, 0)
            self.normalDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainNormals, 1)
            self.indexDataBuffer = lu.createAndAddIndexArray(
                self.vertexArrayObject, terrainInds)

            #normalDataBuffer = createAndAddVertexArrayData<vec4>(g_particleVao, { vec4(0.0f) }, 1);

        vertexShader = """
            #version 330
            in vec3 positionIn;
            in vec3 normalIn;

            uniform mat4 modelToClipTransform;
            uniform mat4 modelToViewTransform;
            uniform mat3 modelToViewNormalTransform;

            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;
            uniform vec2 xyNormScale;
            uniform vec2 xyOffset;

            //2.2
            uniform mat4 worldToViewTransform;


            // 'out' variables declared in a vertex shader can be accessed in the subsequent stages.
            // For a fragment shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front here and in the fragment shader!).
            out VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;
                vec3 v2f_worldSpaceNormal; // 2.1 - Steep

                vec2 v2f_xyNormScale; // 2.1 - Road
                vec2 v2f_xyOffset; // 2.1 - Road

                // 2.2
                vec3 cameraPosition;
                vec3 cameraToPointVector;

            };

            void main() 
            {
                // pass the world-space Z to the fragment shader, as it is used to compute the colour and other things
	            v2f_height = positionIn.z;
                v2f_worldSpacePosition = positionIn;
                v2f_viewSpacePosition = (modelToViewTransform * vec4(positionIn, 1.0)).xyz;
                v2f_viewSpaceNormal = modelToViewNormalTransform * normalIn;

                v2f_worldSpaceNormal = normalIn; //2.1 - Steep
                v2f_xyNormScale = xyNormScale; // 2.1 - Road
                v2f_xyOffset = xyOffset; // 2.1 - Road

                //2.2
                cameraPosition = vec3(worldToViewTransform[3][0],worldToViewTransform[3][1],worldToViewTransform[3][2]);
                cameraToPointVector = normalize(positionIn - cameraPosition);

	            // gl_Position is a buit-in 'out'-variable that gets passed on to the clipping and rasterization stages (hardware fixed function).
                // it must be written by the vertex shader in order to produce any drawn geometry. 
                // We transform the position using one matrix multiply from model to clip space. Note the added 1 at the end of the position to make the 3D
                // coordinate homogeneous.
	            gl_Position = modelToClipTransform * vec4(positionIn, 1.0);
            }
"""

        fragmentShader = """
            // Input from the vertex shader, will contain the interpolated (i.e., area weighted average) vaule out put for each of the three vertex shaders that 
            // produced the vertex data for the triangle this fragmet is part of.
            in VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;

                vec3 v2f_worldSpaceNormal;  //2.1 - Steep
                vec2 v2f_xyNormScale; // 2.1 - Road
                vec2 v2f_xyOffset; // 2.1 - Road

                // 2.2
                vec3 cameraPosition;
                vec3 cameraToPointVector;
            };

            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;
            // 1.4
            uniform sampler2D grassTexture; 
            // Olympics
            uniform sampler2D wallTexture;
            uniform sampler2D seatsTexture;   
            uniform sampler2D trackTexture;
            uniform sampler2D mapTexture;
            uniform sampler2D concreteTexture;

            out vec4 fragmentColor;

            void main() 
            {
                vec3 materialColour = vec3(v2f_height/terrainHeightScale);
                // Default colour 
                vec3 concreteColour = texture(concreteTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz;
                materialColour = concreteColour;

                // 2.1                 
                float slope = dot(v2f_worldSpaceNormal, vec3(v2f_worldSpaceNormal.x, 0.0, v2f_worldSpaceNormal.z)); //Steep 
                float blueChannel = texture(mapTexture, (v2f_worldSpacePosition.xy - v2f_xyOffset) * v2f_xyNormScale).z; //Road

                // Track Texture
                if (blueChannel >= 0.9) {
                    vec3 trackColour = texture(trackTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz;
                    materialColour = trackColour;
                // Grass texture
                } else if (v2f_height < 1) {
                    vec3 grassColour = texture(grassTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz;
                    materialColour = grassColour; 
                // Wall/Banner texture
                } else if ((v2f_height < 11) && (slope < 0.2)) {
                    vec3 wallColour = texture(wallTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz;
                    materialColour = wallColour;                  
                // Seats
                } else if (slope < 0.2) {
                    vec3 seatsColour = texture(seatsTexture, v2f_worldSpacePosition.xy * terrainTextureXyScale).xyz;
                    materialColour = seatsColour; 
                } 
                vec3 reflectedLight = computeShading(materialColour, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour);
	            //fragmentColor = vec4(toSrgb(reflectedLight), 1.0); // before 2.2
	            //fragmentColor = vec4(toSrgb(vec3(v2f_height/terrainHeightScale)), 1.0); //start??

                //2.2 - Fog       
                //fragmentColor = vec4(toSrgb(applyFog(reflectedLight, -v2f_viewSpacePosition.z)), 1.0); // basic fog
                fragmentColor = vec4(toSrgb(applyFog(reflectedLight, -v2f_viewSpacePosition.z, cameraPosition, cameraToPointVector)), 1.0);
            }
"""
        # Note how we provide lists of source code strings for the two shader stages.
        # This is basically the only standard way to 'include' or 'import' code into more than one shader. The variable renderingSystem.commonFragmentShaderCode
        # contains code that we wish to use in all the fragment shaders, for example code to transform the colour output to srgb.
        # It is also a nice place to put code to compute lighting and other effects that should be the same accross the terrain and racer for example.
        self.shader = lu.buildShader([vertexShader], [
            "#version 330\n", renderingSystem.commonFragmentShaderCode,
            fragmentShader
        ], {
            "positionIn": 0,
            "normalIn": 1
        })

        # TODO 1.4: Load texture and configure the sampler
        self.grassTexture = ObjModel.loadTexture("grass.png", "data", True)

        # Olympics
        self.wallTexture = ObjModel.loadTexture("banner.png", "data", True)
        self.seatsTexture = ObjModel.loadTexture("seats.png", "data", True)
        self.trackTexture = ObjModel.loadTexture("track.png", "data", True)
        self.mapTexture = ObjModel.loadTexture("map.png", "data", False)
        self.concreteTexture = ObjModel.loadTexture("concrete.jpg", "data",
                                                    True)
Exemplo n.º 8
0
 def load(self, objModelName, terrain, renderingSystem):
     self.terrain = terrain
     self.position = terrain.startLocations[0]
     # TODO 1.3: This is a good place create and load the racer model
     self.model = ObjModel(objModelName)
Exemplo n.º 9
0
    def __init__(self):
        QtGui.QMainWindow.__init__(self)

        self.authdialog = authdialog(self)
        self.utimer = QtCore.QTimer()

        param.params.ui = Ui_MainWindow()
        ui = param.params.ui

        ui.setupUi(self)

        # Not sure how to do this in designer, so we put it randomly and move it now.
        ui.statusbar.addWidget(ui.userLabel)
        self.setUser(param.params.myuid)

        self.setWindowTitle("Parameter Manager for %s (%s)" %
                            (param.params.hutch.upper(), param.params.table))

        ui.objectTable.verticalHeader().hide()
        ui.objectTable.setCornerButtonEnabled(False)
        ui.objectTable.horizontalHeader().setMovable(True)

        ui.configTable.verticalHeader().hide()
        ui.configTable.setCornerButtonEnabled(False)
        ui.configTable.horizontalHeader().setMovable(True)

        ui.groupTable.verticalHeader().hide()
        ui.groupTable.setCornerButtonEnabled(False)
        ui.groupTable.horizontalHeader().setMovable(False)

        ui.groupWidget.close()

        param.params.db = db()

        ui.menuView.addAction(ui.configWidget.toggleViewAction())
        ui.configWidget.setWindowTitle(param.params.table + " configurations")
        param.params.cfgmodel = CfgModel()
        ui.configTable.init(param.params.cfgmodel, 0, 2)
        ui.configTable.setShowGrid(True)
        ui.configTable.resizeColumnsToContents()
        ui.configTable.setItemDelegate(MyDelegate(self))

        ui.menuView.addAction(ui.objectWidget.toggleViewAction())
        ui.objectWidget.setWindowTitle(param.params.table + " objects")
        param.params.objmodel = ObjModel()
        ui.objectTable.init(param.params.objmodel, 0, 2)
        ui.objectTable.setShowGrid(True)
        ui.objectTable.resizeColumnsToContents()
        ui.objectTable.setSortingEnabled(True)
        ui.objectTable.sortByColumn(param.params.objmodel.namecol,
                                    QtCore.Qt.AscendingOrder)
        ui.objectTable.setItemDelegate(MyDelegate(self))

        ui.menuView.addAction(ui.groupWidget.toggleViewAction())
        ui.groupWidget.setWindowTitle(param.params.table +
                                      " configuration groups")
        param.params.grpmodel = GrpModel()
        ui.groupTable.init(param.params.grpmodel, 0, 3)
        ui.groupTable.setShowGrid(True)
        ui.groupTable.resizeColumnsToContents()
        ui.groupTable.setSortingEnabled(False)
        ui.groupTable.setItemDelegate(MyDelegate(self))

        param.params.objmodel.setupContextMenus(ui.objectTable)
        param.params.cfgmodel.setupContextMenus(ui.configTable)
        param.params.grpmodel.setupContextMenus(ui.groupTable)

        param.params.cfgdialog = dialogs.cfgdialog(param.params.cfgmodel, self)
        param.params.colsavedialog = dialogs.colsavedialog(self)
        param.params.colusedialog = dialogs.colusedialog(self)
        param.params.deriveddialog = dialogs.deriveddialog(self)
        param.params.confirmdialog = dialogs.confirmdialog(self)

        param.params.db.objchange.connect(param.params.objmodel.objchange)
        param.params.db.cfgchange.connect(param.params.objmodel.cfgchange)
        param.params.db.cfgchange.connect(param.params.cfgmodel.cfgchange)
        param.params.db.cfgchange.connect(param.params.grpmodel.cfgchange)
        param.params.db.grpchange.connect(param.params.grpmodel.grpchange)

        param.params.cfgmodel.newname.connect(
            param.params.cfgmodel.haveNewName)
        param.params.cfgmodel.newname.connect(
            param.params.objmodel.haveNewName)
        param.params.cfgmodel.cfgChanged.connect(param.params.objmodel.cfgEdit)

        settings = QtCore.QSettings(param.params.settings[0],
                                    param.params.settings[1])
        settings.beginGroup(param.params.table)
        self.restoreGeometry(settings.value("geometry").toByteArray())
        self.restoreState(settings.value("windowState").toByteArray())
        ui.configTable.restoreHeaderState(
            settings.value("cfgcol/default").toByteArray())
        ui.objectTable.restoreHeaderState(
            settings.value("objcol/default").toByteArray())
        ui.groupTable.restoreHeaderState(
            settings.value("grpcol/default").toByteArray())
        param.params.objmodel.setObjSel(
            str(settings.value("objsel").toByteArray()))

        # MCB - This is so if we have too many rows/columns in the save file,
        # we get rid of them.  Is this just a problem as we develop the group model
        # though?
        param.params.grpmodel.grpchange()

        # MCB - Sigh.  I don't know why this is needed, but it is, otherwise the FreezeTable breaks.
        h = ui.configTable.horizontalHeader()
        h.resizeSection(1, h.sectionSize(1) + 1)
        h.resizeSection(1, h.sectionSize(1) - 1)
        h = ui.objectTable.horizontalHeader()
        h.resizeSection(1, h.sectionSize(1) + 1)
        h.resizeSection(1, h.sectionSize(1) - 1)
        h = ui.groupTable.horizontalHeader()
        h.resizeSection(1, h.sectionSize(1) + 1)
        h.resizeSection(1, h.sectionSize(1) - 1)

        ui.configTable.colmgr = "%s/cfgcol" % param.params.table
        ui.objectTable.colmgr = "%s/objcol" % param.params.table
        ui.groupTable.colmgr = "%s/grpcol" % param.params.table

        if param.params.debug:
            self.connect(ui.debugButton, QtCore.SIGNAL("clicked()"),
                         param.params.grpmodel.doDebug)
        else:
            ui.debugButton.hide()
        self.connect(ui.saveButton, QtCore.SIGNAL("clicked()"),
                     param.params.objmodel.commitall)
        self.connect(ui.revertButton, QtCore.SIGNAL("clicked()"),
                     param.params.objmodel.revertall)
        if param.params.applyOK:
            self.connect(ui.applyButton, QtCore.SIGNAL("clicked()"),
                         param.params.objmodel.applyall)
        else:
            self.connect(
                ui.applyButton, QtCore.SIGNAL("clicked()"),
                lambda: QtGui.QMessageBox.critical(
                    None, "Error",
                    "Apply disabled.  Restart with --applyenable.", QtGui.
                    QMessageBox.Ok))
        self.connect(ui.actionAuto, QtCore.SIGNAL("triggered()"),
                     param.params.objmodel.doShow)
        self.connect(ui.actionProtected, QtCore.SIGNAL("triggered()"),
                     param.params.objmodel.doShow)
        self.connect(ui.actionManual, QtCore.SIGNAL("triggered()"),
                     param.params.objmodel.doShow)
        self.connect(ui.actionTrack, QtCore.SIGNAL("triggered()"),
                     param.params.objmodel.doTrack)
        self.connect(ui.actionAuth, QtCore.SIGNAL("triggered()"),
                     self.doAuthenticate)
        self.connect(ui.actionExit, QtCore.SIGNAL("triggered()"), self.doExit)
        self.connect(self.utimer, QtCore.SIGNAL("timeout()"),
                     self.unauthenticate)
        self.connect(
            ui.objectTable.selectionModel(),
            QtCore.SIGNAL("selectionChanged(QItemSelection,QItemSelection)"),
            param.params.objmodel.selectionChanged)
        # MCB - Sigh. I should just make FreezeTableView actually work.
        self.connect(
            ui.objectTable.cTV.selectionModel(),
            QtCore.SIGNAL("selectionChanged(QItemSelection,QItemSelection)"),
            param.params.objmodel.selectionChanged)
Exemplo n.º 10
0
 def load(self, objModelName, terrain, renderingSystem):
     self.terrain = terrain
     self.position = terrain.startLocations[0]
     self.model = ObjModel(objModelName)
Exemplo n.º 11
0
    def load(self, imageName, renderingSystem):

        with Image.open(imageName) as im:
            self.imageWidth = im.size[0]
            self.imageHeight = im.size[1]
            self.imageData = im.tobytes("raw",
                                        "RGBX" if im.mode == 'RGB' else "RGBA",
                                        0, -1)

            xyOffset = -vec2(float(self.imageWidth), float(
                self.imageHeight)) * self.xyScale / 2.0

            # Calculate vertex positions
            terrainVerts = []
            for j in range(self.imageHeight):
                for i in range(self.imageWidth):
                    offset = (j * self.imageWidth + i) * 4
                    # copy pixel 4 channels
                    imagePixel = self.imageData[offset:offset + 4]
                    # Normalize the red channel from [0,255] to [0.0, 1.0]
                    red = float(imagePixel[0]) / 255.0

                    xyPos = vec2(i, j) * self.xyScale + xyOffset
                    # TODO 1.1: set the height
                    zPos = self.heightScale * red
                    pt = vec3(xyPos[0], xyPos[1], zPos)
                    terrainVerts.append(pt)

                    green = imagePixel[1]
                    if green == 255:
                        self.startLocations.append(pt)
                    if green == 128:
                        self.treeLocations.append(pt)
                    if green == 64:
                        self.rockLocations.append(pt)

            # build vertex normals...
            terrainNormals = [vec3(0.0, 0.0, 1.0)
                              ] * self.imageWidth * self.imageHeight
            for j in range(1, self.imageHeight - 1):
                for i in range(1, self.imageWidth - 1):
                    v = terrainVerts[j * self.imageWidth + i]
                    vxP = terrainVerts[j * self.imageWidth + i - 1]
                    vxN = terrainVerts[j * self.imageWidth + i + 1]
                    dx = vxP - vxN

                    vyP = terrainVerts[(j - 1) * self.imageWidth + i]
                    vyN = terrainVerts[(j + 1) * self.imageWidth + i]
                    dy = vyP - vyN

                    nP = lu.normalize(lu.cross(dx, dy))

                    vdxyP = terrainVerts[(j - 1) * self.imageWidth + i - 1]
                    vdxyN = terrainVerts[(j + 1) * self.imageWidth + i + 1]
                    dxy = vdxyP - vdxyN

                    vdyxP = terrainVerts[(j - 1) * self.imageWidth + i + 1]
                    vdyxN = terrainVerts[(j + 1) * self.imageWidth + i - 1]
                    dyx = vdyxP - vdyxN

                    nD = lu.normalize(lu.cross(dxy, dyx))

                    terrainNormals[j * self.imageWidth + i] = lu.normalize(nP +
                                                                           nD)

            # join verts with quads that is: 2 triangles @ 3 vertices, with one less in each direction.
            terrainInds = [0] * 2 * 3 * (self.imageWidth -
                                         1) * (self.imageHeight - 1)
            for j in range(0, self.imageHeight - 1):
                for i in range(0, self.imageWidth - 1):
                    # Vertex indices to the four corners of the quad.
                    qInds = [
                        j * self.imageWidth + i,
                        j * self.imageWidth + i + 1,
                        (j + 1) * self.imageWidth + i,
                        (j + 1) * self.imageWidth + i + 1,
                    ]
                    outOffset = 3 * 2 * (j * (self.imageWidth - 1) + i)
                    points = [
                        terrainVerts[qInds[0]],
                        terrainVerts[qInds[1]],
                        terrainVerts[qInds[2]],
                        terrainVerts[qInds[3]],
                    ]
                    # output first triangle:
                    terrainInds[outOffset + 0] = qInds[0]
                    terrainInds[outOffset + 1] = qInds[1]
                    terrainInds[outOffset + 2] = qInds[2]
                    # second triangle
                    terrainInds[outOffset + 3] = qInds[2]
                    terrainInds[outOffset + 4] = qInds[1]
                    terrainInds[outOffset + 5] = qInds[3]

            self.terrainInds = terrainInds

            self.vertexArrayObject = lu.createVertexArrayObject()
            self.vertexDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainVerts, 0)
            self.normalDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainNormals, 1)
            self.indexDataBuffer = lu.createAndAddIndexArray(
                self.vertexArrayObject, terrainInds)

            #normalDataBuffer = createAndAddVertexArrayData<vec4>(g_particleVao, { vec4(0.0f) }, 1);

        vertexShader = """
            #version 330
            in vec3 positionIn;
            in vec3 normalIn;

            uniform mat4 worldToViewTransform;
            uniform mat4 modelToClipTransform;
            uniform mat4 modelToViewTransform;
            uniform mat3 modelToViewNormalTransform;
            uniform mat4 lightPOVTransform;
            
            uniform sampler2D terrainDataSampler;
            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;
            uniform vec2 xyNormScale;
            uniform vec2 xyOffset;
            

            // 'out' variables declared in a vertex shader can be accessed in the subsequent stages.
            // For a fragment shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front here and in the fragment shader!).
            out VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;
                vec2 normalizedXYcoords;
                float distance;
                vec3 viewToVertexPosition;
                vec3 worldSpaceNormal;
                vec4 fragPosLightSpace;
                vec3 cameraPosInWorldSpace;
            };

            void main() 
            {
                // pass the world-space Z to the fragment shader, as it is used to compute the colour and other things
	            v2f_height = positionIn.z;
                v2f_worldSpacePosition = positionIn;
                v2f_viewSpacePosition = (modelToViewTransform * vec4(positionIn, 1.0)).xyz;
                v2f_viewSpaceNormal = modelToViewNormalTransform * normalIn;
                worldSpaceNormal = normalIn;
                normalizedXYcoords = positionIn.xy * xyNormScale + xyOffset;
                distance = -v2f_viewSpacePosition.z;
                //first use the worldToViewTransform to get the camera world space coords
                cameraPosInWorldSpace = vec3(worldToViewTransform[3][0],worldToViewTransform[3][1],worldToViewTransform[3][2]);
                viewToVertexPosition = normalize(positionIn - cameraPosInWorldSpace);
	            // gl_Position is a buit-in 'out'-variable that gets passed on to the clipping and rasterization stages (hardware fixed function).
                // it must be written by the vertex shader in order to produce any drawn geometry. 
                // We transform the position using one matrix multiply from model to clip space. Note the added 1 at the end of the position to make the 3D
                // coordinate homogeneous.
                fragPosLightSpace = lightPOVTransform * vec4(positionIn, 1.0);
	            gl_Position = modelToClipTransform * vec4(positionIn, 1.0);
            }
"""

        fragmentShader = """
            // Input from the vertex shader, will contain the interpolated (i.e., area weighted average) vaule out put for each of the three vertex shaders that 
            // produced the vertex data for the triangle this fragmet is part of.
            in VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;
                vec2 normalizedXYcoords;
                float distance; //camera to geometry distance
                vec3 viewToVertexPosition;
                vec3 worldSpaceNormal;
                vec4 fragPosLightSpace;
                vec3 cameraPosInWorldSpace;
            };

            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;
            uniform sampler2D terrainTexture;
            uniform sampler2D roadTexture;
            uniform sampler2D highTexture;
            uniform sampler2D steepTexture;
            uniform sampler2D terrainDataSample;
            //
            uniform sampler2D specularGrassTexture;
            uniform sampler2D specularHighTexture;
            uniform sampler2D specularRoadTexture;
            uniform sampler2D specularSteepTexture;
            //
            out vec4 fragmentColor;

            void main() 
            {
                // trying height = 0.7 / steep 0.5
                //vec3 materialColour = vec3(v2f_height/terrainHeightScale);
                // TODO 1.4: Compute the texture coordinates and sample the texture for the grass and use as material colour.
                vec3 materialDiffuse;
                vec3 materialSpecular;
                float steepThreshold = 0.959931; //roughly 55 degrees rad
                float steepness = acos(dot(normalize(worldSpaceNormal), vec3(0,0,1)));
                vec3 blueChannel = texture(terrainDataSample, normalizedXYcoords).xyz;
                float matSpecExp;
                vec3 reflectedLight;
                
                if(blueChannel.b == 1.0)
                {
                    materialDiffuse = texture(roadTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    materialSpecular = texture(specularRoadTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    reflectedLight = computeShadingDiffuse(materialDiffuse, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, fragPosLightSpace);
                }
                else if(steepness > steepThreshold)
                {
                    materialDiffuse = texture(steepTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    materialSpecular = texture(specularSteepTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    reflectedLight = computeShadingDiffuse(materialDiffuse, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, fragPosLightSpace);
                }
                else if (v2f_height > 55)
                {
                    materialDiffuse = texture(highTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    materialSpecular = texture(specularHighTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    matSpecExp = 50.0;
                    reflectedLight = computeShadingSpecular(materialDiffuse, materialSpecular, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, matSpecExp,  fragPosLightSpace);
                }
                else
                {
                    materialDiffuse = texture(terrainTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    materialSpecular = texture(specularGrassTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).xyz;
                    matSpecExp = 150.0;
                    reflectedLight = computeShadingSpecular(materialDiffuse, materialSpecular, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour, matSpecExp,  fragPosLightSpace);
                }
                
                //float depthValue = texture(shadowMapTexture, vec2(v2f_worldSpacePosition.x,v2f_worldSpacePosition.y) * terrainTextureXyScale).r;
                //fragmentColor = vec4(vec3(depthValue), 1.0);
	            fragmentColor = vec4(toSrgb(applyFog(reflectedLight,distance, cameraPosInWorldSpace, viewToVertexPosition)), 1.0);
	            //fragmentColor = vec4(toSrgb(vec3(v2f_height/terrainHeightScale)), 1.0);

            }
"""
        # Note how we provide lists of source code strings for the two shader stages.
        # This is basically the only standard way to 'include' or 'import' code into more than one shader. The variable renderingSystem.commonFragmentShaderCode
        # contains code that we wish to use in all the fragment shaders, for example code to transform the colour output to srgb.
        # It is also a nice place to put code to compute lighting and other effects that should be the same accross the terrain and racer for example.
        self.shader = lu.buildShader([vertexShader], [
            "#version 330\n", renderingSystem.commonFragmentShaderCode,
            fragmentShader
        ], {
            "positionIn": 0,
            "normalIn": 1
        })

        # TODO 1.4: Load texture and configure the sampler
        self.terrainTexId = ObjModel.loadTexture("data/grass2.png", "", True)
        self.highTexId = ObjModel.loadTexture("data/rock 2.png", "", True)
        self.roadTexId = ObjModel.loadTexture("data/paving 5.png", "", True)
        self.steepTexId = ObjModel.loadTexture("data/rock 5.png", "", True)
        self.specGrassTexId = ObjModel.loadTexture("data/grass_specular.png",
                                                   "", True)
        self.specHighTexId = ObjModel.loadTexture("data/high_specular.png", "",
                                                  True)
        self.specSteepTexId = ObjModel.loadTexture("data/steep_specular.png",
                                                   "", True)
        self.specRoadTexId = ObjModel.loadTexture("data/road_specular.png", "",
                                                  True)
        self.terrainDataSampleTexId = ObjModel.loadTexture(
            "data/track_01_128.png", "", False)
Exemplo n.º 12
0
    def load(self, imageName, renderingSystem):
        with Image.open(imageName) as im:
            self.imageWidth = im.size[0]
            self.imageHeight = im.size[1]
            self.imageData = im.tobytes("raw",
                                        "RGBX" if im.mode == 'RGB' else "RGBA",
                                        0, -1)

            xyOffset = -vec2(float(self.imageWidth), float(
                self.imageHeight)) * self.xyScale / 2.0

            # Calculate vertex positions
            terrainVerts = []
            for j in range(self.imageHeight):
                for i in range(self.imageWidth):
                    offset = (j * self.imageWidth + i) * 4
                    # copy pixel 4 channels
                    imagePixel = self.imageData[offset:offset + 4]
                    # Normalize the red channel from [0,255] to [0.0, 1.0]
                    red = float(imagePixel[0]) / 255.0

                    xyPos = vec2(i, j) * self.xyScale + xyOffset
                    # TODO 1.1: set the height
                    zPos = self.heightScale * red

                    pt = vec3(xyPos[0], xyPos[1], zPos)
                    terrainVerts.append(pt)

                    green = imagePixel[1]
                    if green == 255:
                        self.startLocations.append(pt)
                    if green == 128:
                        self.treeLocations.append(pt)
                    if green == 64:
                        self.rockLocations.append(pt)

            # build vertex normals...
            terrainNormals = [vec3(0.0, 0.0, 1.0)
                              ] * self.imageWidth * self.imageHeight
            for j in range(1, self.imageHeight - 1):
                for i in range(1, self.imageWidth - 1):
                    v = terrainVerts[j * self.imageWidth + i]
                    vxP = terrainVerts[j * self.imageWidth + i - 1]
                    vxN = terrainVerts[j * self.imageWidth + i + 1]
                    dx = vxP - vxN

                    vyP = terrainVerts[(j - 1) * self.imageWidth + i]
                    vyN = terrainVerts[(j + 1) * self.imageWidth + i]
                    dy = vyP - vyN

                    nP = lu.normalize(lu.cross(dx, dy))

                    vdxyP = terrainVerts[(j - 1) * self.imageWidth + i - 1]
                    vdxyN = terrainVerts[(j + 1) * self.imageWidth + i + 1]
                    dxy = vdxyP - vdxyN

                    vdyxP = terrainVerts[(j - 1) * self.imageWidth + i + 1]
                    vdyxN = terrainVerts[(j + 1) * self.imageWidth + i - 1]
                    dyx = vdyxP - vdyxN

                    nD = lu.normalize(lu.cross(dxy, dyx))

                    terrainNormals[j * self.imageWidth + i] = lu.normalize(nP +
                                                                           nD)

            # join verts with quads that is: 2 triangles @ 3 vertices, with one less in each direction.
            terrainInds = [0] * 2 * 3 * (self.imageWidth -
                                         1) * (self.imageHeight - 1)
            for j in range(0, self.imageHeight - 1):
                for i in range(0, self.imageWidth - 1):
                    # Vertex indices to the four corners of the quad.
                    qInds = [
                        j * self.imageWidth + i,
                        j * self.imageWidth + i + 1,
                        (j + 1) * self.imageWidth + i,
                        (j + 1) * self.imageWidth + i + 1,
                    ]
                    outOffset = 3 * 2 * (j * (self.imageWidth - 1) + i)
                    points = [
                        terrainVerts[qInds[0]],
                        terrainVerts[qInds[1]],
                        terrainVerts[qInds[2]],
                        terrainVerts[qInds[3]],
                    ]
                    # output first triangle:
                    terrainInds[outOffset + 0] = qInds[0]
                    terrainInds[outOffset + 1] = qInds[1]
                    terrainInds[outOffset + 2] = qInds[2]
                    # second triangle
                    terrainInds[outOffset + 3] = qInds[2]
                    terrainInds[outOffset + 4] = qInds[1]
                    terrainInds[outOffset + 5] = qInds[3]

            self.terrainInds = terrainInds

            self.vertexArrayObject = lu.createVertexArrayObject()
            self.vertexDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainVerts, 0)
            self.normalDataBuffer = lu.createAndAddVertexArrayData(
                self.vertexArrayObject, terrainNormals, 1)
            self.indexDataBuffer = lu.createAndAddIndexArray(
                self.vertexArrayObject, terrainInds)

            #normalDataBuffer = createAndAddVertexArrayData<vec4>(g_particleVao, { vec4(0.0f) }, 1);

        vertexShader = """
            #version 330
            in vec3 positionIn;
            in vec3 normalIn;

            uniform mat4 modelToClipTransform;
            uniform mat4 modelToViewTransform;
            uniform mat3 modelToViewNormalTransform;

            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;
            uniform vec2 xyNormScale;
            uniform vec2 xyOffset;


            // 'out' variables declared in a vertex shader can be accessed in the subsequent stages.
            // For a fragment shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front here and in the fragment shader!).
            out VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;
            };

            void main() 
            {
                // pass the world-space Z to the fragment shader, as it is used to compute the colour and other things
	            v2f_height = positionIn.z;
                v2f_worldSpacePosition = positionIn;
                v2f_viewSpacePosition = (modelToViewTransform * vec4(positionIn, 1.0)).xyz;
                v2f_viewSpaceNormal = modelToViewNormalTransform * normalIn;

	            // gl_Position is a buit-in 'out'-variable that gets passed on to the clipping and rasterization stages (hardware fixed function).
                // it must be written by the vertex shader in order to produce any drawn geometry. 
                // We transform the position using one matrix multiply from model to clip space. Note the added 1 at the end of the position to make the 3D
                // coordinate homogeneous.
	            gl_Position = modelToClipTransform * vec4(positionIn, 1.0);
            }
"""

        fragmentShader = """
            // Input from the vertex shader, will contain the interpolated (i.e., area weighted average) vaule out put for each of the three vertex shaders that 
            // produced the vertex data for the triangle this fragmet is part of.
            in VertexData
            {
	            float v2f_height;
                vec3 v2f_viewSpacePosition;
                vec3 v2f_viewSpaceNormal;
                vec3 v2f_worldSpacePosition;
            };

            uniform float terrainHeightScale;
            uniform float terrainTextureXyScale;

            out vec4 fragmentColor;

            void main() 
            {
                vec3 materialColour = vec3(v2f_height/terrainHeightScale);
                // TODO 1.4: Compute the texture coordinates and sample the texture for the grass and use as material colour.

                vec3 reflectedLight = computeShading(materialColour, v2f_viewSpacePosition, v2f_viewSpaceNormal, viewSpaceLightPosition, sunLightColour);
	            fragmentColor = vec4(toSrgb(reflectedLight), 1.0);
	            //fragmentColor = vec4(toSrgb(vec3(v2f_height/terrainHeightScale)), 1.0);

            }
"""
        # Note how we provide lists of source code strings for the two shader stages.
        # This is basically the only standard way to 'include' or 'import' code into more than one shader. The variable renderingSystem.commonFragmentShaderCode
        # contains code that we wish to use in all the fragment shaders, for example code to transform the colour output to srgb.
        # It is also a nice place to put code to compute lighting and other effects that should be the same accross the terrain and racer for example.
        self.shader = lu.buildShader([vertexShader], [
            "#version 330\n", renderingSystem.commonFragmentShaderCode,
            fragmentShader
        ], {
            "positionIn": 0,
            "normalIn": 1
        })

        # TODO 1.4: Load texture and configure the sampler
        ObjModel.loadTexture('grass2.png',
                             'F:\COSC3000_GC\Project\mega_racer\data',
                             self.imageData)
        loc = glGetUniformLocation(self.shader, "someTexture")
        glUniform1i(loc, 0)
Exemplo n.º 13
0
def buildShadowShader():
    return lu.buildShader(shadowVertShader(), shadowFragShader(),
                          ObjModel.getDefaultAttributeBindings())
Exemplo n.º 14
0
 def load(self, objModelName, terrain, position, renderingSystem):
     self.terrain = terrain
     self.model = ObjModel(objModelName)
     self.position = position
     self.randRot = random.uniform(0, 6.28)  #0 to 2pi