コード例 #1
0
def test_inverse_dual():
    nu = 0.3
    hm = 0.2e-6

    # E*
    d = FEMData("E*", [70, 60])
    y_pred = np.array(
        [
            inverse_model_dual(x[0] * 1e9, x[-1] * 1e9, 60, x[2], x[1], nu, hm)[1] / 1e9
            for x in d.X
        ]
    )[:, None]
    mape = np.mean(np.abs(y_pred - d.y) / d.y) * 100
    print("E* MAPE:", mape)

    # sigma_y
    d = FEMData("sigma_y", [70, 60])
    y_pred = np.array(
        [
            inverse_model_dual(x[0] * 1e9, x[-1] * 1e9, 60, x[2], x[1], nu, hm)[3] / 1e9
            for x in d.X
        ]
    )[:, None]
    mape = np.mean(np.abs(y_pred - d.y) / d.y) * 100
    print("sigma_y MAPE:", mape)
コード例 #2
0
def validation_FEM(yname, angles, train_size):
    datafem = FEMData(yname, angles)
    # datafem = BerkovichData(yname)

    if train_size == 80:
        kf = RepeatedKFold(n_splits=5, n_repeats=2, random_state=0)
    elif train_size == 90:
        kf = KFold(n_splits=10, shuffle=True, random_state=0)
    else:
        kf = ShuffleSplit(n_splits=10,
                          test_size=len(datafem.X) - train_size,
                          random_state=0)

    mape = []
    iter = 0
    for train_index, test_index in kf.split(datafem.X):
        iter += 1
        print("\nCross-validation iteration: {}".format(iter))

        X_train, X_test = datafem.X[train_index], datafem.X[test_index]
        y_train, y_test = datafem.y[train_index], datafem.y[test_index]

        data = dde.data.DataSet(X_train=X_train,
                                y_train=y_train,
                                X_test=X_test,
                                y_test=y_test)

        mape.append(dde.apply(nn, (data, )))

    print(mape)
    print(yname, train_size, np.mean(mape), np.std(mape))
コード例 #3
0
def validation_exp_cross3(yname):
    datalow = FEMData(yname, [70])
    dataBerkovich = BerkovichData(yname)
    dataexp1 = ExpData("../data/Al6061.csv", yname)
    dataexp2 = ExpData("../data/Al7075.csv", yname)

    ape = []
    y = []
    for _ in range(10):
        print("\nIteration: {}".format(len(ape)))
        data = dde.data.MfDataSet(
            X_lo_train=datalow.X,
            X_hi_train=np.vstack((dataBerkovich.X, dataexp1.X)),
            y_lo_train=datalow.y,
            y_hi_train=np.vstack((dataBerkovich.y, dataexp1.y)),
            X_hi_test=dataexp2.X,
            y_hi_test=dataexp2.y,
        )
        res = dde.apply(mfnn, (data, ))
        ape.append(res[:2])
        y.append(res[2])

    print(yname)
    print(np.mean(ape, axis=0), np.std(ape, axis=0))
    np.savetxt("y.dat", np.hstack(y))
コード例 #4
0
def validation_exp_cross2(yname, train_size):
    datalow = FEMData(yname, [70])
    dataBerkovich = BerkovichData(yname)
    dataexp1 = ExpData("../data/B3067.csv", yname)
    dataexp2 = ExpData("../data/B3090.csv", yname)

    ape = []
    y = []

    kf = ShuffleSplit(n_splits=10, train_size=train_size, random_state=0)
    for train_index, _ in kf.split(dataexp1.X):
        print("\nIteration: {}".format(len(ape)))
        print(train_index)
        data = dde.data.MfDataSet(
            X_lo_train=datalow.X,
            X_hi_train=np.vstack((dataBerkovich.X, dataexp1.X[train_index])),
            y_lo_train=datalow.y,
            y_hi_train=np.vstack((dataBerkovich.y, dataexp1.y[train_index])),
            X_hi_test=dataexp2.X,
            y_hi_test=dataexp2.y,
        )
        res = dde.apply(mfnn, (data, ))
        ape.append(res[:2])
        y.append(res[2])

    print(yname, train_size)
    print(np.mean(ape, axis=0), np.std(ape, axis=0))
    np.savetxt(yname + ".dat", np.hstack(y).T)
コード例 #5
0
def validation_exp_cross(yname):
    datalow = FEMData(yname, [70])
    dataBerkovich = BerkovichData(yname)
    dataexp = ExpData("../data/B3067.csv", yname)
    train_size = 10

    ape = []
    y = []

    # cases = range(6)
    # for train_index in itertools.combinations(cases, 3):
    #     train_index = list(train_index)
    #     test_index = list(set(cases) - set(train_index))

    kf = ShuffleSplit(n_splits=10,
                      test_size=len(dataexp.X) - train_size,
                      random_state=0)
    for train_index, test_index in kf.split(dataexp.X):
        print("\nIteration: {}".format(len(ape)))
        print(train_index, "==>", test_index)
        data = dde.data.MfDataSet(
            X_lo_train=datalow.X,
            X_hi_train=np.vstack((dataBerkovich.X, dataexp.X[train_index])),
            y_lo_train=datalow.y,
            y_hi_train=np.vstack((dataBerkovich.y, dataexp.y[train_index])),
            X_hi_test=dataexp.X[test_index],
            y_hi_test=dataexp.y[test_index],
        )
        res = dde.apply(mfnn, (data, ))
        ape.append(res[:2])
        y.append(res[2])

    print(yname)
    print(np.mean(ape, axis=0), np.std(ape, axis=0))
    np.savetxt(yname + ".dat", np.hstack(y).T)
コード例 #6
0
def validation_mf(yname, train_size):
    datalow = FEMData(yname, [70])
    # datalow = ModelData(yname, 10000, "forward_n")
    datahigh = BerkovichData(yname)
    # datahigh = FEMData(yname, [70])

    kf = ShuffleSplit(n_splits=10,
                      test_size=len(datahigh.X) - train_size,
                      random_state=0)
    # kf = LeaveOneOut()

    mape = []
    iter = 0
    for train_index, test_index in kf.split(datahigh.X):
        iter += 1
        print("\nCross-validation iteration: {}".format(iter), flush=True)

        data = dde.data.MfDataSet(
            X_lo_train=datalow.X,
            X_hi_train=datahigh.X[train_index],
            y_lo_train=datalow.y,
            y_hi_train=datahigh.y[train_index],
            X_hi_test=datahigh.X[test_index],
            y_hi_test=datahigh.y[test_index],
        )
        mape.append(dde.apply(mfnn, (data, ))[0])
        # mape.append(dde.apply(mfgp, (data,)))

    print(mape)
    print(yname, train_size, np.mean(mape), np.std(mape))
コード例 #7
0
def test_inverse():
    nu = 0.3
    hm = 0.2e-6

    # E*
    d = FEMData("E*", [70])
    # d = BerkovichData("E*")
    # d = ExpData("B3067.csv", "E*")
    y_pred = np.array(
        [inverse_model(x[0] * 1e9, x[2], x[1], nu, hm)[1] / 1e9 for x in d.X]
    )[:, None]
    ape = np.abs(y_pred - d.y) / d.y * 100
    print("E* APE:", np.mean(ape), np.std(ape))
    np.savetxt("E.dat", np.hstack((d.y, y_pred)))

    # sigma_y
    d = FEMData("sigma_y", [70])
    # d = BerkovichData("sigma_y")
    # d = ExpData("B3067.csv", "sigma_y")
    y_pred = np.array(
        [inverse_model(x[0] * 1e9, x[2], x[1], nu, hm)[3] / 1e9 for x in d.X]
    )[:, None]
    ape = np.abs(y_pred - d.y) / d.y * 100
    print("sigma_y APE:", np.mean(ape), np.std(ape))
    np.savetxt("sy.dat", np.hstack((d.y, y_pred)))

    # n
    # d = FEMData("n", [70])
    d = BerkovichData("n")
    y_pred = np.array([inverse_model(x[0] * 1e9, x[2], x[1], nu, hm)[2] for x in d.X])[
        :, None
    ]
    print(d.y)
    print(y_pred)
    ape = np.abs(y_pred - d.y) / d.y * 100
    print("n APE:", np.mean(ape), np.std(ape))
    np.savetxt("n.dat", np.hstack((d.y, y_pred)))
コード例 #8
0
def validation_scaling(yname):
    datafem = FEMData(yname, [70])
    # dataexp = ExpData(yname)
    dataexp = BerkovichData(yname, scale_c=True)

    mape = []
    for iter in range(10):
        print("\nIteration: {}".format(iter))
        data = dde.data.DataSet(X_train=datafem.X,
                                y_train=datafem.y,
                                X_test=dataexp.X,
                                y_test=dataexp.y)
        mape.append(nn(data))

    print(yname)
    print(np.mean(mape), np.std(mape))
コード例 #9
0
def validation_model(yname, train_size):
    datafem = FEMData(yname, [70])

    mape = []
    for iter in range(10):
        print("\nIteration: {}".format(iter))

        datamodel = ModelData(yname, train_size, "forward")
        X_train, X_test = datamodel.X, datafem.X
        y_train, y_test = datamodel.y, datafem.y

        data = dde.data.DataSet(X_train=X_train,
                                y_train=y_train,
                                X_test=X_test,
                                y_test=y_test)

        # mape.append(svm(data))
        mape.append(nn(data))

    print(yname, train_size)
    print(np.mean(mape), np.std(mape))
コード例 #10
0
def validation_exp(yname):
    datalow = FEMData(yname, [70])
    dataBerkovich = BerkovichData(yname)
    dataexp = ExpData("../data/B3067.csv", yname)

    ape = []
    y = []
    for iter in range(10):
        print("\nIteration: {}".format(iter))
        data = dde.data.MfDataSet(
            X_lo_train=datalow.X,
            X_hi_train=dataBerkovich.X,
            y_lo_train=datalow.y,
            y_hi_train=dataBerkovich.y,
            X_hi_test=dataexp.X,
            y_hi_test=dataexp.y,
        )
        res = dde.apply(mfnn, (data, ))
        ape.append(res[:2])
        y.append(res[2])

    print(yname)
    print(np.mean(ape, axis=0), np.std(ape, axis=0))
    np.savetxt(yname + ".dat", np.hstack(y).T)