Пример #1
0
def preTrain(model, device, params, preOptimizer, preScheduler, fun):
    model.train()
    file = open("lossData.txt", "w")

    for step in range(params["preStep"]):
        # The volume integral
        data = torch.from_numpy(
            generateData.sampleFromDisk(
                params["radius"], params["bodyBatch"])).float().to(device)

        output = model(data)

        target = fun(params["radius"], data)

        loss = output - target
        loss = torch.mean(loss * loss) * math.pi * params["radius"]**2

        if step % params["writeStep"] == params["writeStep"] - 1:
            with torch.no_grad():
                ref = exact(params["radius"], data)
                error = errorFun(output, ref, params)
                # print("Loss at Step %s is %s."%(step+1,loss.item()))
                print("Error at Step %s is %s." % (step + 1, error))
            file.write(str(step + 1) + " " + str(error) + "\n")

        model.zero_grad()
        loss.backward()

        # Update the weights.
        preOptimizer.step()
Пример #2
0
def train(model,device,params,optimizer,scheduler):
    model.train()

    data1 = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
    data1.requires_grad = True
    data2 = torch.from_numpy(generateData.sampleFromSurface(params["radius"],params["bdryBatch"])).float().to(device)

    for step in range(params["trainStep"]-params["preStep"]):
        output1 = model(data1)

        model.zero_grad()

        dfdx = torch.autograd.grad(output1,data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0]
        # Loss function 1
        fTerm = ffun(data1).to(device)
        loss1 = torch.mean(0.5*torch.sum(dfdx*dfdx,1).unsqueeze(1)-fTerm*output1) * math.pi*params["radius"]**2

        # Loss function 2
        output2 = model(data2)
        target2 = exact(params["radius"],data2)
        loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] * 2*math.pi*params["radius"])
        loss = loss1+loss2             

        if step%params["writeStep"] == params["writeStep"]-1:
            with torch.no_grad():
                target = exact(params["radius"],data1)
                error = errorFun(output1,target,params)
                # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
                print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
            file = open("lossData.txt","a")
            file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")

        if step%params["sampleStep"] == params["sampleStep"]-1:
            data1 = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
            data1.requires_grad = True
            data2 = torch.from_numpy(generateData.sampleFromSurface(params["radius"],params["bdryBatch"])).float().to(device)

        if 10*(step+1)%params["trainStep"] == 0:
            print("%s%% finished..."%(100*(step+1)//params["trainStep"]))

        loss.backward()

        optimizer.step()
        scheduler.step()      
Пример #3
0
def test(model,device,params):
    numQuad = params["numQuad"]

    data = torch.from_numpy(generateData.sampleFromDisk(1,numQuad)).float().to(device)
    output = model(data)
    target = exact(params["radius"],data).to(device)

    error = output-target
    error = math.sqrt(torch.mean(error*error)*math.pi*params["radius"]**2)
    # Calculate the L2 norm error.
    ref = math.sqrt(torch.mean(target*target)*math.pi*params["radius"]**2)
    return error/ref
Пример #4
0
def train(model, device, params, optimizer, scheduler):
    model.train()

    data1 = torch.from_numpy(
        generateData.sampleFromDisk(params["radius"],
                                    params["bodyBatch"])).float().to(device)
    data2 = torch.from_numpy(
        generateData.sampleFromSurface(params["radius"],
                                       params["bdryBatch"])).float().to(device)

    x_shift = torch.from_numpy(np.array([params["diff"],
                                         0.0])).float().to(device)
    y_shift = torch.from_numpy(np.array([0.0,
                                         params["diff"]])).float().to(device)
    data1_x_shift = data1 + x_shift
    data1_y_shift = data1 + y_shift

    for step in range(params["trainStep"] - params["preStep"]):
        output1 = model(data1)
        output1_x_shift = model(data1_x_shift)
        output1_y_shift = model(data1_y_shift)

        dfdx = (output1_x_shift - output1) / params[
            "diff"]  # Use difference to approximate derivatives.
        dfdy = (output1_y_shift - output1) / params["diff"]

        model.zero_grad()

        # Loss function 1
        fTerm = ffun(data1).to(device)
        loss1 = torch.mean(0.5 * (dfdx * dfdx + dfdy * dfdy) -
                           fTerm * output1) * math.pi * params["radius"]**2

        # Loss function 2
        output2 = model(data2)
        target2 = exact(params["radius"], data2)
        loss2 = torch.mean((output2 - target2) * (output2 - target2) *
                           params["penalty"] * 2 * math.pi * params["radius"])
        loss = loss1 + loss2

        if step % params["writeStep"] == params["writeStep"] - 1:
            with torch.no_grad():
                target = exact(params["radius"], data1)
                error = errorFun(output1, target, params)
                # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
                print("Error at Step %s is %s." %
                      (step + params["preStep"] + 1, error))
            file = open("lossData.txt", "a")
            file.write(
                str(step + params["preStep"] + 1) + " " + str(error) + "\n")

        if step % params["sampleStep"] == params["sampleStep"] - 1:
            data1 = torch.from_numpy(
                generateData.sampleFromDisk(
                    params["radius"], params["bodyBatch"])).float().to(device)
            data2 = torch.from_numpy(
                generateData.sampleFromSurface(
                    params["radius"], params["bdryBatch"])).float().to(device)

            data1_x_shift = data1 + x_shift
            data1_y_shift = data1 + y_shift

        if 10 * (step + 1) % params["trainStep"] == 0:
            print("%s%% finished..." % (100 *
                                        (step + 1) // params["trainStep"]))

        loss.backward()

        optimizer.step()
        scheduler.step()