Beispiel #1
0
def cal_material_parameter2D(F_macro, type):
    # eps_macro = eps_macro[0]
    if type == '1Dlinear':
        filename = 'Mechanics1D_1_NNs.dat'
    elif type == '1Dnonlinear':
        filename = 'Mechanics1D_Nonlinear_1_NNs.dat'
    elif type == '2DLaminate':
        filename = 'yvonnetFunction3d_1com_3_NNs.dat'
    dimD, dimd, L, N, activation_func_type, min_input, max_input, min_max_output, \
    A, w, c, b, d, d0 = fp.get_NN_parameters(filename)
    F_macro = tramnnmx(F_macro.reshape(-1), min_input, max_input)
    sigma_macro = np.zeros(6)
    P = np.zeros(6)
    C_effective = np.zeros([6, 6])
    energy = 0
    for i in range(0, L):
        y = dot(A[:, :, i], F_macro) + b[:, :, i].reshape(-1)
        for n in range(0, N):
            z = dot(w[n, :, i], y) + d[n, :, i]
            zm = dot(w[n, :, i], y)
            energy += c[n, :, i] * tansig(z)
            sigma_macro += c[n, :, i] * dot(
                w[n, :, i], A[:, :, i]) * derivative_activation(z)
            C_effective += c[n, :, i] * dot(dot(w[n, :, i], A[:, :, i]).reshape(6, 1), dot(w[n, :, i], A[:, :, i]).reshape(1, 6)) \
                  * 4 * ((-2 * exp(-2*z) / (1 + exp(-2*z)) ** 2) + (4 * exp(-4*z) / (1 + exp(-2*z)) ** 3))
        energy += d0[:, :, i].reshape(-1)
    # return the real scale data
    energy_aver = postmnmx(energy, min_max_output[0], min_max_output[1])
    P = return_to_rescale_for_stress(sigma_macro, min_input, max_input,
                                     min_max_output[0], min_max_output[1])
    # P[0] = return_to_rescale_for_stress(sigma_macro[0], min_input[0], max_input[0], min_max_output[0], min_max_output[1])
    # P[1] = return_to_rescale_for_stress(sigma_macro[1], min_input[1], max_input[1], min_max_output[0], min_max_output[1])
    # P[2] = return_to_rescale_for_stress(sigma_macro[2], min_input[2], max_input[2], min_max_output[0], min_max_output[1])
    # P[3] = return_to_rescale_for_stress(sigma_macro[3], min_input[3], max_input[3], min_max_output[0], min_max_output[1])
    # sigma_macro[0] = postmnmx2(sigma_macro[0], min_max_output[0], min_max_output[1])
    # sigma_macro[1] = postmnmx2(sigma_macro[1], min_max_output[0], min_max_output[1])
    # sigma_macro[2] = postmnmx2(sigma_macro[2], min_max_output[0], min_max_output[1])
    # sigma_macro[3] = postmnmx2(sigma_macro[3], min_max_output[0], min_max_output[1])
    print(sigma_macro)
    # C_effective = postmnmx(C_effective, min_max_output[0], min_max_output[1])
    C_effective_unscaled = unnormalize_for_hessian(C_effective, min_input,
                                                   max_input,
                                                   min_max_output[0],
                                                   min_max_output[1])
    return energy_aver, P, C_effective_unscaled
def cal_material_parameter2D(F_macro, type):
    # eps_macro = eps_macro[0]
    if type == '1Dlinear':
        filename = 'Mechanics1D_1_NNs.dat'
    elif type == '1Dnonlinear':
        # filename = 'Mechanics1D_Nonlinear_1_NNs.dat'
        filename = 'Mechanics1D_Nonlinear_1d_2com_5N_1000M_1_NNs.dat'
    elif type == '2DLaminate':
        filename = 'Laminate2_4_NNs.dat'
    elif type == 'SaintVenant':
        filename = './machinelearning/training_results/SaintVenant_4d_10com_10N_30000M_4_NNs.dat'
    elif type == 'NeoHookean':
        filename = './machinelearning/training_results/NeoHookean_4d_10com_10N_30000M_4_NNs.dat'
    elif type == 'Inclusion-NeoHookean2':
        filename = './machinelearning/training_results/Inclusion_50x50_NeoHookean2/Inclusion_NeoHookean2_4d_15com_20N_80epoch_30000M_4_NNs.dat'
    dimD, dimd, L, N, activation_func_type, min_input, max_input, min_max_output, \
    A, w, c, b, d, d0 = fp.get_NN_parameters(filename)
    F_macro = tramnnmx(F_macro.reshape(-1), min_input, max_input)
    sigma_macro = np.zeros(size)
    P = np.zeros(size)
    C_effective = np.zeros([size, size])
    energy = 0
    for i in range(0, L):
        y = dot(A[:, :, i], F_macro) + b[:, :, i].reshape(-1)
        for n in range(0, N):
            z = dot(w[n, :, i], y) + d[n, :, i]
            zm = dot(w[n, :, i], y)
            energy += c[n, :, i] * tansig(z)
            sigma_macro += c[n, :, i] * dot(
                w[n, :, i], A[:, :, i]) * derivative_activation(z)
            C_effective += c[n, :, i] * dot(dot(w[n, :, i], A[:, :, i]).reshape(size, 1), dot(w[n, :, i], A[:, :, i]).reshape(1, size)) \
                  * 4 * ((-2 * exp(-2*z) / (1 + exp(-2*z)) ** 2) + (4 * exp(-4*z) / (1 + exp(-2*z)) ** 3))
        energy += d0[:, :, i].reshape(-1)
    # return the real scale data
    energy_aver = postmnmx(energy, min_max_output[0], min_max_output[1])
    P = return_to_rescale_for_stress(sigma_macro, min_input, max_input,
                                     min_max_output[0], min_max_output[1])
    P = P.reshape(dim, dim)
    # print(P)
    C_effective_unscaled = unnormalize_for_hessian(C_effective, min_input,
                                                   max_input,
                                                   min_max_output[0],
                                                   min_max_output[1])
    return P, C_effective_unscaled, energy_aver
Beispiel #3
0
def cal_material_parameter2D(F_macro, type):
    # eps_macro = eps_macro[0]
    if type == '1Dlinear':
        filename = 'Mechanics1D_1_NNs.dat'
    elif type == '1Dnonlinear':
        filename = 'Mechanics1D_Nonlinear_1_NNs.dat'
    elif type == 'Yvonnet_quadratic_function_2d':
        filename = 'yvonnetFunction3d_1com_3_NNs.dat'
    elif type == 'NeoHooke-Laminate':
        filename = 'Laminate_NeoHookean_4d_15com_20N_80epoch_50000M_4_NNs.dat'
    elif type == 'NeoHookean2-Inclusion':
        filename = 'Inclusion_NeoHookean2_4d_15com_20N_80epoch_30000M_4_NNs.dat'
    dimD, dimd, L, N, activation_func_type, min_input, max_input, min_max_output, \
    A, w, c, b, d, d0 = fp.get_NN_parameters(filename)
    F_macro = tramnnmx(F_macro.reshape(-1), min_input, max_input)
    sigma_macro = np.zeros(Ddim)
    P = np.zeros(Ddim)
    C_effective = np.zeros([Ddim, Ddim])
    energy = 0
    for i in range(0, L):
        y = dot(A[:, :, i], F_macro) + b[:, :, i].reshape(-1)
        for n in range(0, N):
            z = dot(w[n, :, i], y) + d[n, :, i]
            zm = dot(w[n, :, i], y)
            energy += c[n, :, i] * tansig(z)
            sigma_macro += c[n, :, i] * dot(
                w[n, :, i], A[:, :, i]) * derivative_activation(z)
            C_effective += c[n, :, i] * dot(dot(w[n, :, i], A[:, :, i]).reshape(Ddim, 1), dot(w[n, :, i], A[:, :, i]).reshape(1, Ddim)) \
                  * 4 * ((-2 * exp(-2*z) / (1 + exp(-2*z)) ** 2) + (4 * exp(-4*z) / (1 + exp(-2*z)) ** 3))
        energy += d0[:, :, i].reshape(-1)
    # return the real scale data
    energy_aver = postmnmx(energy, min_max_output[0], min_max_output[1])
    P = return_to_rescale_for_stress(sigma_macro, min_input, max_input,
                                     min_max_output[0], min_max_output[1])
    print(sigma_macro)
    C_effective_unscaled = unnormalize_for_hessian(C_effective, min_input,
                                                   max_input,
                                                   min_max_output[0],
                                                   min_max_output[1])
    return energy_aver, P, C_effective_unscaled
#!/usr/bin/env python2
import util.FileProcessing as fp

input  = './Job-1.inp'
output = './cantileverbeam.dat'
fp.create_meshdata_from_abaqusfile(input, output)
Beispiel #5
0
 Wgrid_FFT = np.zeros([rows, columns])
 P11grid_FFT = np.zeros([rows, columns])
 Pgrid_FFT = np.zeros([2, 2, rows, columns])
 C1111grid_FFT = np.zeros([rows, columns])
 Cgrid_FFT = np.zeros([4, 4, rows, columns])
 i = -1
 while i < eof - 1:
     i += 1
     line = content[i]
     # print(line)
     if re.search('#*[a-zA-Z]+', line):
         print(line)
         continue
     else:
         try:
             segments = fp.get_values_wsplit(line, ';')
         except ValueError as e:
             print(e)
             print(line)
             continue
         F12grid_FFT[i / rows, i % columns] = segments[1]
         F21grid_FFT[i / rows, i % columns] = segments[2]
         Wgrid_FFT[i / rows, i % columns] = segments[4]
         P11grid_FFT[i / rows, i % columns] = segments[5]
         Pgrid_FFT[0, 0, i / rows, i % columns] = segments[5]
         Pgrid_FFT[0, 1, i / rows, i % columns] = segments[6]
         Pgrid_FFT[1, 0, i / rows, i % columns] = segments[7]
         Pgrid_FFT[1, 1, i / rows, i % columns] = segments[8]
         C1111grid_FFT[i / rows, i % columns] = segments[9]
         Cgrid_FFT[0, 0, i / rows, i % columns] = segments[9]
         Cgrid_FFT[0, 1, i / rows, i % columns] = segments[10]
Beispiel #6
0
#!/usr/bin/env python2
import util.FileProcessing as fp
import numpy as np
inputpath = './Cook-Q4-Job-2.inp'
outputpath = './Cook_fe_fft_Q4.dat'
fp.create_meshdata_from_abaqusfile(inputpath, outputpath)

# Check input
try:
    inputfile = open(inputpath, 'r')
except IOError as e:
    print("Can't open file :" + e.filename)
    exit()
# Open output file to write
outputfile = open(outputpath, 'a+')
displacementNode = np.array(
    [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])

tractionNode = np.array([
    540, 539, 538, 537, 536, 535, 534, 533, 532, 531, 530, 529, 528, 527, 526,
    525, 524, 523
])

outputfile.write('<NodeConstraints>\r')

for i in displacementNode:
    strg1 = 'u[' + str(i - 1) + '] = 0;\r'
    outputfile.write(strg1)
    strg2 = 'v[' + str(i - 1) + '] = 0;\r'
    outputfile.write(strg2)
#!/usr/bin/env python2
import util.FileProcessing as fp
import numpy as np
inputpath = './TimoBeam_mesh3_T3.inp'
outputpath = './TimoBeam_mesh3_T3.dat'
# fp.create_meshdata_from_abaqusfile(inputpath, outputpath)
fp.create_meshdata_fullscale_from_abaqusfile(inputpath, outputpath, 1, 2048, 1,
                                             2048)
# Check input
# try:
#  inputfile = open(inputpath, 'r')
# except IOError as e:
#  print("Can't open file :" + e.filename)
#  exit()
# # Open output file to write
# outputfile = open(outputpath, 'a+')
# displacementNode = np.array([101, 102, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, \
#  819, 820, 821, 822, 823, 824, 825, 826, 827, 828])
#
# tractionNode = np.array([103, 104, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, \
#  942, 943, 944, 945, 946, 947, 948, 949, 950, 951])
#
# outputfile.write('<NodeConstraints>\r')
#
# for i in displacementNode:
#  strg1 = 'u[' + str(i - 1) + '] = 0;\r'
#  outputfile.write(strg1)
#  strg2 = 'v[' + str(i - 1) + '] = 0;\r'
#  outputfile.write(strg2)
#
# outputfile.write('</NodeConstraints>\r')
import util.FileProcessing as fp

input = './training_data_inclusion.dat'
output = './out_training_data_inclusion.dat'
fp.create_data_training_file(input, output)
#!/usr/bin/env python2
import util.FileProcessing as fp
import numpy as np
inputpath = './Job-7.inp'
outputpath = './100_inclusions_job-1-Q4.dat'
# fp.create_meshdata_from_abaqusfile(inputpath, outputpath)
fp.create_meshdata_fullscale_from_abaqusfile(inputpath, outputpath, 1, 400,
                                             401, 3024)
# Check input
try:
    inputfile = open(inputpath, 'r')
except IOError as e:
    print("Can't open file :" + e.filename)
    exit()
# Open output file to write
outputfile = open(outputpath, 'a+')
displacementNode = np.array([101, 102, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, \
 819, 820, 821, 822, 823, 824, 825, 826, 827, 828])

tractionNode = np.array([103, 104, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, \
 942, 943, 944, 945, 946, 947, 948, 949, 950, 951])

outputfile.write('<NodeConstraints>\r')

for i in displacementNode:
    strg1 = 'u[' + str(i - 1) + '] = 0;\r'
    outputfile.write(strg1)
    strg2 = 'v[' + str(i - 1) + '] = 0;\r'
    outputfile.write(strg2)

outputfile.write('</NodeConstraints>\r')