Exemple #1
0
# Colorindo objetos para facilitar identificacao
cubo1.color = (46, 119, 187)
cubo2.color = (29, 131, 195)
cubo3.color = (39, 174, 227)

imagem = Image.new('RGB', (numero_furos_chapa, numero_furos_chapa))

for l in range(chapa.number_of_holes):
    for c in range(chapa.number_of_holes):
        colisoes = []
        cor_primeira_colisao = (255, 255, 255)
        menor_t = 0

        # Definindo o valor d do raio com base no ponto da chapa
        raio.d = Coordinate.given_two_points(p0, chapa.point(l, c))

        # Calculando colisões
        colisoes += cilindro1.verify_colision(raio)
        colisoes += cone1.verify_colision(raio)
        colisoes += cilindro2.verify_colision(raio)
        colisoes += cone2.verify_colision(raio)
        colisoes += cubo1.verify_colision(raio)
        colisoes += cubo2.verify_colision(raio)
        colisoes += cubo3.verify_colision(raio)

        # Verificando quem foi atigindo primeiro
        if len(colisoes) > 0:
            cor_primeira_colisao = colisoes[0]["color"]
            menor_t = colisoes[0]["t"]
    def BRDFSampling(self, intersPoint, ray, scene, intersectionNormal):
        ############################################################## Prepare
        # all the light hitting our intersection point
        # this value is later normalized with the sample count
        # before that its just the sum of incoming light
        aquiredLightSum = 0

        # Array of light intensity value that goes into the integrator and the color
        # of the light [ (lightIntensity,[R,G,B]), ... ]
        aquiredLightsIntensity = np.zeros(MISIntegrator.sampleCount)
        aquiredLightsColor = np.zeros((MISIntegrator.sampleCount, 3))

        # filled out elements in the array
        aquiredLightsCount = 0

        # Calculate matrix that rotates from the default hemisphere normal
        # to the intersection normal
        intersectionNormal[0] += 0.001
        intersectionNormal[1] += 0.001
        intersectionNormal[2] += 0.001

        sampleRoatationMatrix = self.rotation_matrix(
            np.cross(MISIntegrator.defaultHemisphereNormal,
                     intersectionNormal),
            np.dot(MISIntegrator.defaultHemisphereNormal, intersectionNormal) *
            np.pi)

        # phi and theta how the camera looks at our intersection point
        camera_theta, camera_phi = self.VectorToAngles(ray.o - intersPoint)

        # precalculate theta cdfs for all light sources
        angleSteps = 20
        thetaCDFs = np.zeros((len(scene.lights), angleSteps))
        phiCDFs = np.zeros((len(scene.lights), angleSteps))

        for lightSourceNr in range(len(scene.lights)):
            pointOnLight = scene.lights[lightSourceNr].v1 * 0.33 + \
                           scene.lights[lightSourceNr].v2 * 0.33 + \
                           scene.lights[lightSourceNr].v3 * 0.33
            lightTheta, lightPhi = self.VectorToAngles(intersPoint -
                                                       pointOnLight)

            # calc theta cdf
            for currAngleStep in range(angleSteps):
                # -pi to pi
                theta = -np.pi + ((
                    (2 * np.pi) / angleSteps - 1) * currAngleStep) + 0.01
                #print(theta)

                thetaCDFs[lightSourceNr][currAngleStep] = \
                    self.brdf.lookupValue(theta, lightPhi, camera_theta, camera_phi)[0]

            # calc phi cdf
            for currAngleStep in range(angleSteps):
                # 0 to 2*pi
                phi = ((2 * np.pi) / angleSteps - 1) * currAngleStep

                phiCDFs[lightSourceNr][currAngleStep] = \
                    self.brdf.lookupValue(lightTheta, phi, camera_theta, camera_phi)[0]

            thetaCDFs[lightSourceNr] = self.UnnormalizedFuncToCummulatedFunc(
                thetaCDFs[lightSourceNr])
            phiCDFs[lightSourceNr] = self.UnnormalizedFuncToCummulatedFunc(
                phiCDFs[lightSourceNr])
        """
        fig = plt.figure(figsize=plt.figaspect(0.5) * 1.5)
        ax = fig.add_subplot(111, projection='3d')
        ax.set_aspect('equal', )
        ax.set_xlim(-1.1, 1.1)
        ax.set_ylim(-1.1, 1.1)
        ax.set_zlim(-1.1, 1.1)
        ax.grid(False)
        for a in (ax.w_xaxis, ax.w_yaxis, ax.w_zaxis):
            for t in a.get_ticklines() + a.get_ticklabels():
                t.set_visible(False)
            a.line.set_visible(False)
            a.pane.set_visible(False)
        ax.view_init(elev=90, azim=0)

        omegasr3 = np.zeros((MISIntegrator.sampleCount, 3))

        for n in range(MISIntegrator.sampleCount):
            selectedLightIndex = int(np.round(np.random.random() * (len(scene.lights) - 1)))
            probabilityTheta = self.sampleOnCDF(thetaCDFs[selectedLightIndex])
            probabilityPhi = self.sampleOnCDF(phiCDFs[selectedLightIndex])

            theta = np.arccos(probabilityTheta)
            phi = (probabilityPhi * 2 - 1) * np.pi

            #theta = np.arccos(np.random.random())
            #phi = (np.random.random() * 2 - 1) * np.pi
            print("theta: ", theta, " phi:", phi)

            # map onto sphere
            # we get a point on the unit sphere that is oriented along the positive x axis
            lightSenseRaySecondPoint = self.twoAnglesTo3DPoint(theta, phi)
            omegasr3[n] = lightSenseRaySecondPoint # np.dot(roateMatrix, omegasr3[n])

        soa = np.zeros((2, 6))

        #soa[0, :] = [0, 0, 0, defaultNormal[0], defaultNormal[1], defaultNormal[2] * -1]
        #soa[1, :] = [0, 0, 0, alteredNormal[0], alteredNormal[1], alteredNormal[2] * -1]

        X, Y, Z, U, V, W = zip(*soa)

#        ax.quiver(X, Y, Z, U, V, W, color=[[0, 0, 1], [0, 1, 0]], pivot="tail", length=0.9)

        ax.scatter(omegasr3[:, 0], omegasr3[:, 1], omegasr3[:, 2] * -1)

        ax.set_xlabel('X........................')
        ax.set_ylabel('Y........................')
        ax.set_zlabel('Z........................')

        plt.show()
        return ([0,0,0],0)
        """

        # integrate over sphere using monte carlo
        for sampleNr in range(MISIntegrator.sampleCount):
            ############################################################## Sample Rays
            lightSenseRay = Ray(intersPoint)

            #
            # sample generation
            #
            # select a random light to decide which two cdfs to use
            selectedLightIndex = int(
                np.round(np.random.random() * (len(scene.lights) - 1)))

            # generate direction of light sense ray shot away from the hemisphere

            # generate theta and phi
            # to avoid points clustering at the top we use cos^-1 to convert angles from [0,1) to rad
            # for phi its sufficient to just map from [0,1] to -pi to pi
            probabilityTheta = self.sampleOnCDF(thetaCDFs[selectedLightIndex])
            #probabilityPhi = self.sampleOnCDF(phiCDFs[selectedLightIndex])
            theta = np.arccos(probabilityTheta)
            #phi = (probabilityPhi * 2 - 1) * np.pi

            #theta = np.arccos(np.random.random())
            phi = (np.random.random() * 2 - 1) * np.pi

            # map onto sphere
            # we get a point on the unit sphere that is oriented along the positive x axis
            lightSenseRaySecondPoint = self.twoAnglesTo3DPoint(theta, phi)
            #print(lightSenseRaySecondPoint)

            # but because we need a sphere that is oriented along the intersection normal
            # we rotate the point with the precalculated sample rotation matrix
            lightSenseRaySecondPoint = np.dot(sampleRoatationMatrix,
                                              lightSenseRaySecondPoint)

            # to get direction for ray we aquire the vector from the intersection point to our adjusted point on
            # the sphere
            #pointOnLight = scene.lights[selectedLightIndex].v1 * 0.33 + \
            #               scene.lights[selectedLightIndex].v2 * 0.33 + \
            #               scene.lights[selectedLightIndex].v3 * 0.33
            #lightSenseRay.d =  pointOnLight - intersPoint#-lightSenseRaySecondPoint
            #lightSenseRay.d /= np.linalg.norm(lightSenseRay.d)
            lightSenseRay.d = lightSenseRaySecondPoint
            #print(np.linalg.norm(lightSenseRay.d - lightSenseRaySecondPoint))

            # send ray on its way
            if scene.intersectLights(lightSenseRay):
                aquiredLight = lightSenseRay.firstHitShape.lightIntensity

                # weigh light intensity by probability that this direction could be generated
                #aquiredLight /= np.maximum(probabilityTheta,0.001)
                #print(probabilityTheta)

                aquiredLightSum += aquiredLight

                aquiredLightsIntensity[aquiredLightsCount] = aquiredLight
                aquiredLightsColor[
                    aquiredLightsCount] = lightSenseRay.firstHitShape.lightColor
                aquiredLightsCount += 1

        ############################################################## Calculate Light
        combinedLightColor = np.zeros(3)

        # avoid / 0 when no light was aquired
        if aquiredLightSum > 0:
            #
            # calculate pixel color
            #

            # first calculate the color of the light hitting the shape
            combinedLightColor = \
                self.weighColorByIntensity(aquiredLightsIntensity, aquiredLightsColor, aquiredLightSum)

            # normalize lightIntensity
            aquiredLightSum /= MISIntegrator.sampleCount

            #if ray.firstHitShape.tri:
            #    for n in range(len(debugRayList)):
            #        debugRayList[n].print2(n)
            #else:
            """
            if ray.firstHitShape.tri:
                for n in range(len(debugRayList)):
                    debugRayList[n].print2(n)
            """
        #    return [0,1,0]

        # combine light color and object color + make it as bright as light that falls in
        # because we calculate the light value over an area we have to divide by area of hemisphere (2*pi)

        # because we can have tiny light sources and huge ones like the sun we need to
        # compress the dynamic range so a pc screen can still show the difference between
        # sunlight and a canle
        # log attenuates very high values and increases very small ones to an extent
        # small values are between 0-1 (+1 because log is only defined starting at 1)

        # dynamic compression can be adjusted by dividing factor. /2 means that all log(light) over 2 are the
        # brightest

        return combinedLightColor, aquiredLightSum / (2 * np.pi)