コード例 #1
0
    def processGradients(self, grads, fields):
        processedGrads = grads

        processedGrads[0] = grads[0] / (2 * np.pi * tf.cast(
            tf.reshape(self.metric, tf.concat([self.latShape, [1, 1]], 0)),
            tf.complex128))
        processedGrads[1] = FieldTools.projectSu2Gradients(grads[1], fields[1])
        processedGrads[1] = processedGrads[1] / (2 * np.pi * tf.cast(
            tf.reshape(self.metric, tf.concat([self.latShape, [1, 1, 1]], 0)),
            tf.complex128))

        return processedGrads
コード例 #2
0
ファイル: generatePair.py プロジェクト: davidho95/tfmonopoles
minSteps = 1000  # Gets stuck in an unwanted saddle point without these
printIncrement = 10

while numSteps < minSteps or (rmsGrad > tol and numSteps < maxNumSteps):
    # Compute the field energy, with tf watching the variables
    with tf.GradientTape() as tape:
        energy = lossFn()

    vars = [scalarFieldVar, gaugeFieldVar]

    # Compute the gradients using automatic differentiation
    grads = tape.gradient(energy, vars)

    # Postprocess the gauge field gradients so they point in the tangent space
    # to SU(2)
    grads[1] = FieldTools.projectSu2Gradients(grads[1], gaugeFieldVar)

    # Compute max gradient for stopping criterion
    gradSq = FieldTools.innerProduct(grads[0], grads[0], tr=True)
    gradSq += FieldTools.innerProduct(grads[1], grads[1], tr=True, adj=True)

    rmsGrad = tf.sqrt(gradSq)

    if (numSteps % printIncrement == 0):
        print("Energy after " + str(numSteps) + " iterations:       " +\
            str(energy.numpy()))
        print("RSS gradient after " + str(numSteps) + " iterations: " +\
            str(rmsGrad.numpy()))

    # Perform the gradient descent step
    opt.apply_gradients(zip(grads, vars))
コード例 #3
0
    def processGradients(self, grads, fields):
        processedGrads = grads
        processedGrads[1] = FieldTools.projectSu2Gradients(grads[1], fields[1])

        return processedGrads