Пример #1
0
def test(model, device, params):
    numQuad = params["numQuad"]

    data = torch.from_numpy(
        generateData.sampleFromDomain(numQuad)).float().to(device)
    output = model(data)
    target = exact(data).to(device)

    error = output - target
    error = math.sqrt(torch.mean(error * error))
    # Calculate the L2 norm error.
    ref = math.sqrt(torch.mean(target * target))
    return error / ref
Пример #2
0
def train(model, device, params, optimizer, scheduler):
    ratio = (4 * 2.0 + 2 * math.pi * 0.3) / (2.0 * 2.0 - math.pi * 0.3**2)
    model.train()

    data1 = torch.from_numpy(generateData.sampleFromDomain(
        params["bodyBatch"])).float().to(device)
    data2 = torch.from_numpy(
        generateData.sampleFromBoundary(
            params["bdryBatch"])).float().to(device)
    x_shift = torch.from_numpy(np.array([params["diff"],
                                         0.0])).float().to(device)
    y_shift = torch.from_numpy(np.array([0.0,
                                         params["diff"]])).float().to(device)
    data1_x_shift = data1 + x_shift
    data1_y_shift = data1 + y_shift

    for step in range(params["trainStep"] - params["preStep"]):
        output1 = model(data1)
        output1_x_shift = model(data1_x_shift)
        output1_y_shift = model(data1_y_shift)

        dfdx = (output1_x_shift - output1) / params[
            "diff"]  # Use difference to approximate derivatives.
        dfdy = (output1_y_shift - output1) / params["diff"]

        model.zero_grad()

        # Loss function 1
        fTerm = ffun(data1).to(device)
        loss1 = torch.mean(0.5 * (dfdx * dfdx + dfdy * dfdy) - fTerm * output1)

        # Loss function 2
        output2 = model(data2)
        target2 = exact(data2)
        loss2 = torch.mean((output2 - target2) * (output2 - target2) *
                           params["penalty"] * ratio)
        loss = loss1 + loss2

        if step % params["writeStep"] == params["writeStep"] - 1:
            with torch.no_grad():
                target = exact(data1)
                error = errorFun(output1, target, params)
                # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
                print("Error at Step %s is %s." %
                      (step + params["preStep"] + 1, error))
            file = open("lossData.txt", "a")
            file.write(
                str(step + params["preStep"] + 1) + " " + str(error) + "\n")

        if step % params["sampleStep"] == params["sampleStep"] - 1:
            data1 = torch.from_numpy(
                generateData.sampleFromDomain(
                    params["bodyBatch"])).float().to(device)
            data2 = torch.from_numpy(
                generateData.sampleFromBoundary(
                    params["bdryBatch"])).float().to(device)

            data1_x_shift = data1 + x_shift
            data1_y_shift = data1 + y_shift

        if 10 * (step + 1) % params["trainStep"] == 0:
            print("%s%% finished..." % (100 *
                                        (step + 1) // params["trainStep"]))

        loss.backward()

        optimizer.step()
        scheduler.step()
Пример #3
0
def train(model, device, params, optimizer, scheduler):
    ratio = (4 * 2.0 + 2 * math.pi * 0.3) / (2.0 * 2.0 - math.pi * 0.3**2)
    model.train()

    data1 = torch.from_numpy(generateData.sampleFromDomain(
        params["bodyBatch"])).float().to(device)
    data1.requires_grad = True
    data2 = torch.from_numpy(
        generateData.sampleFromBoundary(
            params["bdryBatch"])).float().to(device)

    for step in range(params["trainStep"] - params["preStep"]):
        output1 = model(data1)

        model.zero_grad()

        dfdx = torch.autograd.grad(output1,
                                   data1,
                                   grad_outputs=torch.ones_like(output1),
                                   retain_graph=True,
                                   create_graph=True,
                                   only_inputs=True)[0]
        dfdxx = torch.autograd.grad(dfdx[:, 0].unsqueeze(1),
                                    data1,
                                    grad_outputs=torch.ones_like(output1),
                                    retain_graph=True,
                                    create_graph=True,
                                    only_inputs=True)[0][:, 0].unsqueeze(1)
        dfdyy = torch.autograd.grad(dfdx[:, 1].unsqueeze(1),
                                    data1,
                                    grad_outputs=torch.ones_like(output1),
                                    retain_graph=True,
                                    create_graph=True,
                                    only_inputs=True)[0][:, 1].unsqueeze(1)
        # Loss function 1
        fTerm = ffun(data1).to(device)
        loss1 = torch.mean((dfdxx + dfdyy + fTerm) * (dfdxx + dfdyy + fTerm))

        # Loss function 2
        output2 = model(data2)
        target2 = exact(data2)
        loss2 = torch.mean((output2 - target2) * (output2 - target2) *
                           params["penalty"] * ratio)
        loss = loss1 + loss2

        if step % params["writeStep"] == params["writeStep"] - 1:
            with torch.no_grad():
                target = exact(data1)
                error = errorFun(output1, target, params)
                # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
                print("Error at Step %s is %s." %
                      (step + params["preStep"] + 1, error))
            file = open("lossData.txt", "a")
            file.write(
                str(step + params["preStep"] + 1) + " " + str(error) + "\n")

        if step % params["sampleStep"] == params["sampleStep"] - 1:
            data1 = torch.from_numpy(
                generateData.sampleFromDomain(
                    params["bodyBatch"])).float().to(device)
            data1.requires_grad = True
            data2 = torch.from_numpy(
                generateData.sampleFromBoundary(
                    params["bdryBatch"])).float().to(device)

        if 10 * (step + 1) % params["trainStep"] == 0:
            print("%s%% finished..." % (100 *
                                        (step + 1) // params["trainStep"]))

        loss.backward()

        optimizer.step()
        scheduler.step()