コード例 #1
0
ファイル: homework3.py プロジェクト: tshidhore/solver-codes
def diffusion_CN(filename, phi_cold, phi_hot, t_final, dt, alpha, flag=0):

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(
        mshfile_fullpath, node_reordering=True)

    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa)  # Converting partofa to an array
    faono1 = np.array(faono)  # Converting faono to an array
    cold_bc = np.where(
        partofa1 ==
        'COLD')  # Vectorized approach to find face belonging to part 'COLD'
    hot_bc = np.where(
        partofa1 ==
        'HOT')  # Vectorized approach to find face belonging to part 'HOT'
    solid = np.where(
        partofa1 ==
        'SOLID')  # Vectorized approach to find face belonging to part 'SOLID'

    # Find part to which each node belongs to
    partono = []
    for j in np.arange(nno):
        part_name = partofa1[faono1[j]]
        hot_count = np.array(np.where(part_name == 'HOT')).size
        cold_count = np.array(np.where(part_name == 'COLD')).size
        if cold_count != 0:
            partono.append('COLD')
        elif hot_count != 0:
            partono.append('HOT')
        else:
            partono.append('SOLID')

    partono1 = np.array(partono)

    phi = np.zeros(nno)  #Full phi

    AREA = np.zeros(ncv)

    #Pre-processing and finding normals over all CVs
    for i in np.arange(ncv):
        nocv = noofa[faocv[i]]  # Nodal pairs for each face of the CV
        par_x = xy_no[nocv[:, 1], 0] - xy_no[
            nocv[:, 0],
            0]  #x-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        par_y = xy_no[nocv[:, 1], 1] - xy_no[
            nocv[:, 0],
            1]  #y-component of vector parallel to face. Convention, 2nd point - 1st point in nocv

        #Calculating areas of CV assuming rectangles or triangles
        if np.size(faocv[i]) == 3:
            area_cv = np.abs(0.5 * ((par_x[0] * par_y[1]) -
                                    (par_x[1] * par_y[0])))
            AREA[i] = area_cv

        if np.size(faocv[i]) == 4:
            area_cv = max(
                np.abs((par_x[0] * par_y[1]) - (par_x[1] * par_y[0])),
                np.abs((par_x[0] * par_y[2]) - (par_x[2] * par_y[0])))
            AREA[i] = area_cv

    #Temperature for internal nodes
    nno_int = nno - np.unique(noofa[cold_bc]).size - np.unique(
        noofa[hot_bc]).size  # No. of internal nodes
    phi[:nno_int] = 400. * np.ones(
        nno_int)  # Define initial value on internal nodes

    #Defining boundary values in phi
    phi[[np.unique(noofa[cold_bc])]] = phi_cold
    phi[[np.unique(noofa[hot_bc])]] = phi_hot

    Gx = scysparse.csr_matrix(
        (nno, nno))  # Matrix to calculate X-gradient at CVs
    Gy = scysparse.csr_matrix(
        (nno, nno))  # Matrix to calculate Y-gradient at CVs
    I = scysparse.identity(nno)

    for i in np.arange(nno):
        neigh_no = np.unique(noofa[
            faono[i]])  # Gives neighbouring nodes including the central nodes
        neigh_no = np.delete(
            neigh_no, np.where(neigh_no == i)
        )  # Find index of central node and delete that entry from neighbouring node array
        neigh_no = np.delete(
            neigh_no, np.where(neigh_no == -1)
        )  # Find index of boundary node and delete the -1 entry from neighbouring node array
        dx_ik = (xy_no[neigh_no, 0] - xy_no[i, 0]
                 )  # Stores dx for all neighbouring nodes
        dy_ik = (xy_no[neigh_no, 1] - xy_no[i, 1]
                 )  # Stores dy for all neighbouring nodes
        w_ik = 1. / np.sqrt(
            (dx_ik**2) + (dy_ik**2))  # Array of weights for least-squared fit
        a_ik = sum((w_ik * dx_ik)**2)
        b_ik = sum(
            ((w_ik)**2) * dx_ik * dy_ik
        )  #Co-efficients a_ik, b_ik, c_ik from least-squared fitting algorithm.
        c_ik = sum((w_ik * dy_ik)**2)

        det = (a_ik * c_ik) - (b_ik**2)

        # Filling out weights for collocation point
        Gx[i, i] -= sum(((c_ik * ((w_ik)**2) * dx_ik) -
                         (b_ik * ((w_ik)**2) * dy_ik))) / det
        Gy[i, i] -= sum(((a_ik * ((w_ik)**2) * dy_ik) -
                         (b_ik * ((w_ik)**2) * dx_ik))) / det

        for j, n in enumerate(neigh_no):
            Gx[i, n] += ((c_ik * ((w_ik[j])**2) * dx_ik[j]) -
                         (b_ik * ((w_ik[j])**2) * dy_ik[j])) / det
            Gy[i, n] += ((a_ik * ((w_ik[j])**2) * dy_ik[j]) -
                         (b_ik * ((w_ik[j])**2) * dx_ik[j])) / det

    # Max. CFL based on diffusion
    CFL_max = 4 * alpha * dt / np.min(AREA)
    print "CFL_max = %f" % (CFL_max)
    time_t = 0.
    count = 1

    Divgrad = (Gx * Gx) + (Gy * Gy)
    Divgrad[nno_int:, :] = 0.

    A = scysparse.identity(nno) - (0.5 * dt * Divgrad)
    B = scysparse.identity(nno) + (0.5 * dt * Divgrad)

    # Time loop
    while time_t <= t_final:

        print "Iteration %d" % (count)
        print "Time %f" % (time_t)
        phi = splinalg.spsolve(A, B * phi)
        count += 1
        time_t += dt
        if flag == 1:
            if count % 10 == 0:
                plot_data.plot_data(
                    xy_no[:, 0], xy_no[:, 1], phi,
                    "Pure Diffusion: Crank Nicolson, Solution at t=%f, CFL = %f.pdf"
                    % (time_t, CFL_max))
    plot_data.plot_data(
        xy_no[:, 0], xy_no[:, 1], phi,
        "Pure Diffusion: Crank Nicolson, Final Solution at t=%f at CFL_max = %f.pdf"
        % (t_final, CFL_max))
    return (phi, CFL_max)
コード例 #2
0
ファイル: homework3.py プロジェクト: tshidhore/solver-codes
figure_folder = "../report/figures/"
icemcfd_project_folder = './mesh/'
filename = "Mesh_3.msh"
filename1 = "Mesh_2.msh"

figwidth, figheight = 14, 12
lineWidth = 3
fontSize = 25
gcafontSize = 21

if p1a:

    print "Module for problem 1a."
    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(
        mshfile_fullpath, node_reordering=True)

    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa)  # Converting partofa to an array
    faono1 = np.array(faono)  # Converting faono to an array
    #    cold_bc = np.where(partofa1=='COLD')   # Vectorized approach to find face belonging to part 'COLD'
    #    hot_bc = np.where(partofa1=='HOT')   # Vectorized approach to find face belonging to part 'HOT'
    #    solid = np.where(partofa1=='SOLID')   # Vectorized approach to find face belonging to part 'SOLID'

    CFL_max = 0.85

    cx = 3.0 * xy_no[:, 0]
    cy = 4.0 * xy_no[:, 1]
コード例 #3
0
ファイル: homework3.py プロジェクト: tshidhore/solver-codes
def convection_SL(filename, phi_cold, phi_hot, t_final, dt):

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(
        mshfile_fullpath, node_reordering=True)

    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa)  # Converting partofa to an array
    faono1 = np.array(faono)  # Converting faono to an array
    cold_bc = np.where(
        partofa1 ==
        'COLD')  # Vectorized approach to find face belonging to part 'COLD'
    hot_bc = np.where(
        partofa1 ==
        'HOT')  # Vectorized approach to find face belonging to part 'HOT'
    solid = np.where(
        partofa1 ==
        'SOLID')  # Vectorized approach to find face belonging to part 'SOLID'

    cx = 3.0 * xy_no[:, 0]
    cy = 4.0 * xy_no[:, 1]
    # Find part to which each node belongs to
    partono = []
    for j in np.arange(nno):
        part_name = partofa1[faono1[j]]
        hot_count = np.array(np.where(part_name == 'HOT')).size
        cold_count = np.array(np.where(part_name == 'COLD')).size
        if cold_count != 0:
            partono.append('COLD')
        elif hot_count != 0:
            partono.append('HOT')
        else:
            partono.append('SOLID')

    partono1 = np.array(partono)

    phi = np.zeros(nno)  #Full phi

    AREA = np.zeros(ncv)

    #Pre-processing and finding normals over all CVs
    for i in np.arange(ncv):
        nocv = noofa[faocv[i]]  # Nodal pairs for each face of the CV
        par_x = xy_no[nocv[:, 1], 0] - xy_no[
            nocv[:, 0],
            0]  #x-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        par_y = xy_no[nocv[:, 1], 1] - xy_no[
            nocv[:, 0],
            1]  #y-component of vector parallel to face. Convention, 2nd point - 1st point in nocv

        #Calculating areas of CV assuming rectangles or triangles
        if np.size(faocv[i]) == 3:
            area_cv = np.abs(0.5 * ((par_x[0] * par_y[1]) -
                                    (par_x[1] * par_y[0])))
            AREA[i] = area_cv

        if np.size(faocv[i]) == 4:
            area_cv = max(
                np.abs((par_x[0] * par_y[1]) - (par_x[1] * par_y[0])),
                np.abs((par_x[0] * par_y[2]) - (par_x[2] * par_y[0])))
            AREA[i] = area_cv

    #Temperature for internal nodes
    nno_int = nno - np.unique(noofa[cold_bc]).size - np.unique(
        noofa[hot_bc]).size  # No. of internal nodes
    phi[:nno_int] = 400. * np.ones(
        nno_int)  # Define initial value on internal nodes

    #Defining boundary values in phi
    phi[[np.unique(noofa[cold_bc])]] = phi_cold
    phi[[np.unique(noofa[hot_bc])]] = phi_hot

    # Defining the operator
    x_past = np.zeros(nno_int)
    y_past = np.zeros(nno_int)
    phi_past = np.zeros(nno_int)

    # Filling weights in L
    for i in np.arange(nno_int):

        x_past[i] = xy_no[i, 0] - cx[i] * dt
        y_past[i] = xy_no[i, 1] - cy[i] * dt

    # Max. CFL based on diffusion
    CFL_max = np.max(np.sqrt((cx)**2 + (cy)**2)) * dt / np.sqrt(np.min(AREA))
    print "CFL_max = %f" % (CFL_max)
    time_t = 0.
    count = 1

    # Time loop
    while time_t <= t_final:

        print "Iteration %d" % (count)
        print "Time %f" % (time_t)
        phi_past = griddata(np.vstack(
            (xy_no[:, 0].flatten(), xy_no[:, 1].flatten())).T,
                            np.vstack(phi.flatten()), (x_past, y_past),
                            method="cubic")
        phi[:nno_int] = phi_past.reshape(phi[:nno_int].shape)
        count += 1
        time_t += dt


#        if count%20:
#            plot_data.plot_data(xy_no[:,0],xy_no[:,1],phi,"Solution at t=%f.pdf" %(time_t))
#    plot_data.plot_data(xy_no[:,0],xy_no[:,1],phi,"Final Solution at t=%f.pdf" %(t_final))
    print "CFL_max = %f" % (CFL_max)
    plot_data.plot_data(
        xy_no[:, 0], xy_no[:, 1], phi,
        "Final Solution: Convection at t=%f for Semi-Lagrange approach.pdf" %
        (time_t))
    return (phi, CFL_max)
コード例 #4
0
def correction(filename, mesh_no):

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(
        mshfile_fullpath, node_reordering=True)

    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa)  # Converting partofa to an array
    faono1 = np.array(faono)  # Converting faono to an array
    cold_bc = np.where(
        partofa1 ==
        'COLD')  # Vectorized approach to find face belonging to part 'COLD'
    hot_bc = np.where(
        partofa1 ==
        'HOT')  # Vectorized approach to find face belonging to part 'HOT'
    solid = np.where(
        partofa1 ==
        'SOLID')  # Vectorized approach to find face belonging to part 'SOLID'

    # Find part to which each node belongs to
    partono = []
    for j in np.arange(nno):
        part_name = partofa1[faono1[j]]
        hot_count = np.array(np.where(part_name == 'HOT')).size
        cold_count = np.array(np.where(part_name == 'COLD')).size
        if cold_count != 0:
            partono.append('COLD')
        elif hot_count != 0:
            partono.append('HOT')
        else:
            partono.append('SOLID')

    partono1 = np.array(partono)

    nfa_int = np.size(np.where(partofa1 == 'SOLID'))
    # Divergence operator
    Dx_f2cv = scysparse.csr_matrix(
        (ncv, nfa_int), dtype="float64")  #Creating x part of operator
    Dy_f2cv = scysparse.csr_matrix(
        (ncv, nfa_int), dtype="float64")  #Creating y part of operator

    q_bc = np.zeros(ncv)
    u_bc = np.zeros(nfa)
    v_bc = np.zeros(nfa)

    u = (xy_fa[:, 1]) * (xy_fa[:, 0]**2)  #x-component of velocity
    u_star = np.zeros(u.size)
    u_star[:nfa_int] = (xy_fa[:nfa_int, 1]) * (xy_fa[:nfa_int, 0]**
                                               2) + 0.1 * xy_fa[:nfa_int, 0]
    u_star[nfa_int:] = u[nfa_int:]
    u_corr = np.zeros(u.size)
    v = -(xy_fa[:, 0]) * (xy_fa[:, 1]**2)  #y-component of velocity
    v_star = np.zeros(v.size)
    v_star[:nfa_int] = (xy_fa[:nfa_int, 1]) * (xy_fa[:nfa_int, 0]**
                                               2) + 0.1 * xy_fa[:nfa_int, 0]
    v_star[nfa_int:] = v[nfa_int:]
    v_corr = np.zeros(v.size)

    for l in np.arange(nfa):
        if partofa1[l] != 'SOLID':
            u_bc[l] = u[l]
            v_bc[l] = v[l]

    NORMAL = []  #blank normal array to be filled up
    AREA = np.zeros(ncv)

    #Pre-processing and finding normals over all CVs
    for i in np.arange(ncv):
        nocv = noofa[faocv[i]]  # Nodal pairs for each face of the CV
        face_co = xy_fa[faocv[i]]  # Face centroids of each face of CV
        check_vecs = face_co - xy_cv[i]  #Vectors from CV centre to face centre
        par_x = xy_no[nocv[:, 1], 0] - xy_no[
            nocv[:, 0],
            0]  #x-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        par_y = xy_no[nocv[:, 1], 1] - xy_no[
            nocv[:, 0],
            1]  #y-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        normal_fa = np.c_[
            -par_y,
            par_x]  #Defining normal vector to faces. Convention, normal is 90* clock-wise.
        dir_check = normal_fa[:,
                              0] * check_vecs[:,
                                              0] + normal_fa[:,
                                                             1] * check_vecs[:,
                                                                             1]  # Checks if normal_fa is aligned in the same direction as check_vecs.
        normal_fa[np.where(dir_check < 0)] = -normal_fa[np.where(
            dir_check < 0
        )]  # Flips sign of components in normal_fa where the dot product i.e. dir_check is negative
        NORMAL.append(normal_fa)  # Spits out all normals indexed by Cvs

        #Calculating areas of CV assuming rectangles or triangles
        if np.size(faocv[i]) == 3:
            area_cv = np.abs(0.5 * ((par_x[0] * par_y[1]) -
                                    (par_x[1] * par_y[0])))
            AREA[i] = area_cv

        if np.size(faocv[i]) == 4:
            area_cv = max(
                np.abs((par_x[0] * par_y[1]) - (par_x[1] * par_y[0])),
                np.abs((par_x[0] * par_y[2]) - (par_x[2] * par_y[0])))
            AREA[i] = area_cv

    for j in np.arange(ncv):
        normal = NORMAL[j]  # Normals of the CV
        #Works as there are utmost 4 nodes right now. Dont know how slow it will be for higher order element shapes
        for ii, nn in enumerate(faocv[j]):
            if partofa1[nn] == 'SOLID':
                Dx_f2cv[j, nn] += normal[ii, 0] / AREA[j]
                Dy_f2cv[j, nn] += normal[ii, 1] / AREA[j]

            else:
                q_bc[j] += u_bc[nn] * normal[
                    ii, 0] / AREA[j] + v_bc[nn] * normal[ii, 1] / AREA[j]

    Gx, Gy = Gradient(filename, mesh_no)

    A = (Dx_f2cv.dot(Gx)) + (Dy_f2cv.dot(Gy))

    dt = 1.

    RHS = ((Dx_f2cv.dot(u_star[:nfa_int])) +
           (Dy_f2cv.dot(v_star[:nfa_int])) + q_bc) / dt

    Div_starred = RHS * dt

    rank = np.linalg.matrix_rank(A.todense())

    A[0, :] = 1.

    phi = splinalg.spsolve(A, RHS)

    u_corr[:nfa_int] = u_star[:nfa_int] - ((dt) * Gx.dot(phi))

    v_corr[:nfa_int] = v_star[:nfa_int] - ((dt) * Gy.dot(phi))

    u_corr[nfa_int:] = u_star[nfa_int:]

    v_corr[nfa_int:] = v_star[nfa_int:]

    Div_corr = (Dx_f2cv.dot(u_corr[:nfa_int])) + (Dy_f2cv.dot(
        v_corr[:nfa_int])) + q_bc

    plt.quiver(xy_fa[:, 0], xy_fa[:, 1], u_corr, v_corr, color='b')
    plt.quiver(xy_fa[:, 0], xy_fa[:, 1], u, v, color='r')
    print "Saving figure: " + figure_folder + "Mesh" + str(
        mesh_no) + "Quiver plot for corrected and analytical.pdf"
    plt.savefig(figure_folder + "Mesh" + str(mesh_no) + "Quiver_velocity.pdf")
    plt.close()

    plt.quiver(xy_fa[:, 0], xy_fa[:, 1], u_corr, v_corr, color='b')
    print "Saving figure: " + figure_folder + "Mesh" + str(
        mesh_no) + "Quiver plot for starred velocity.pdf"
    plt.savefig(figure_folder + "Mesh" + str(mesh_no) +
                "Quiver_velocity_star.pdf")
    plt.close()

    plot_data.plot_data(
        xy_cv[:, 0], xy_cv[:, 1], np.log(np.abs(Div_starred)), "Mesh " +
        str(mesh_no) + "Flooded_Contour_of_divergence_perturbed_field.pdf")
    plot_data.plot_data(
        xy_cv[:, 0], xy_cv[:, 1], np.log(np.abs(Div_corr)), "Mesh " +
        str(mesh_no) + "Flooded_Contour_of_divergence_corrected_field.pdf")

    e_RMS = np.sqrt(np.average((Div_corr - np.zeros(Div_corr.size))**2))

    return (rank, e_RMS)
コード例 #5
0
ファイル: homework1.py プロジェクト: tshidhore/solver-codes
def plot_conn(part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa):
    
       
# set_trace(), uncommenting this line pauses the code here, just like keyboard in Matlab

#####################################################
########## Plot Grid Labels / Connectivity ##########
#####################################################

fig_width = 30
fig_height = 17
textFontSize   = 15
gcafontSize    = 32
lineWidth      = 2

Plot_Node_Labels = True
Plot_Face_Labels = True
Plot_CV_Labels   = True

# the following enables LaTeX typesetting, which will cause the plotting to take forever..
# from matplotlib import rc as matplotlibrc
# matplotlibrc('text.latex', preamble='\usepackage{color}')
# matplotlibrc('text',usetex=True)
# matplotlibrc('font', family='serif')

mgplx = 0.05*np.abs(max(xy_no[:,0])-min(xy_no[:,0]))
mgply = 0.05*np.abs(max(xy_no[:,1])-min(xy_no[:,1]))
xlimits = [min(xy_no[:,0])-mgplx,max(xy_no[:,0])+mgplx]
ylimits = [min(xy_no[:,1])-mgply,max(xy_no[:,1])+mgply]

fig = plt.figure(0,figsize=(fig_width,fig_height))
ax = fig.add_subplot(111)
ax.plot(xy_no[:,0],xy_no[:,1],'o',markersize=5,markerfacecolor='k')

node_color = 'k'
centroid_color = 'r'

for inos_of_fa in noofa:
   ax.plot(xy_no[inos_of_fa,0], xy_no[inos_of_fa,1], 'k-', linewidth = lineWidth)

if Plot_Face_Labels:
  nfa = xy_fa.shape[0] # number of faces
  faces_indexes = range(0,nfa)
  for x_fa,y_fa,ifa in zip(xy_fa[:,0],xy_fa[:,1],faces_indexes):
    ax.text(x_fa,y_fa,repr(ifa),transform=ax.transData,color='k',
        verticalalignment='center',horizontalalignment='center',fontsize=textFontSize )

if Plot_Node_Labels:
  nno = xy_no.shape[0] # number of nodes
  node_indexes = range(0,nno)
  for xn,yn,ino in zip(xy_no[:,0],xy_no[:,1],node_indexes):
    ax.text(xn,yn,repr(ino),transform=ax.transData,color='r',
        verticalalignment='top',horizontalalignment='left',fontsize=textFontSize )

if Plot_CV_Labels:
  ncv = xy_cv.shape[0]  # number of control volumes
  cv_indexes = range(0,ncv)
  for xcv,ycv,icv in zip(xy_cv[:,0],xy_cv[:,1],cv_indexes):
    ax.text(xcv,ycv,repr(icv),transform=ax.transData,color='b',
        verticalalignment='top',horizontalalignment='left',fontsize=textFontSize )

ax.axis('equal')
ax.set_xlim(xlimits)
ax.set_ylim(ylimits)
ax.set_xlabel(r'$x$',fontsize=1.5*gcafontSize)
ax.set_ylabel(r'$y$',fontsize=1.5*gcafontSize)
plt.setp(ax.get_xticklabels(),fontsize=gcafontSize)
plt.setp(ax.get_yticklabels(),fontsize=gcafontSize)
fig_name = filename.split('.')[0]+'.pdf'
plt.savefig(fig_name)  

p1 = False
p2 = True


#####################################################################################################################

if p1:
    
    # Poor Man's heat transfer problem with and without operator generation for a given mesh
    # Code calculates speed-up due to operator generation for 500 iterations of the solvers

    filename = 'mesh_set/Mesh3.msh'
    icemcfd_project_folder = './'
#    filename = '/mesh_set_1/heated_rod_ncv=103.msh'
    filename = 'mesh_set/Mesh3.msh'
    figure_folder = "../report/"

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(mshfile_fullpath,node_reordering=True)
    start_time2 = time.time()
    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa) # Converting partofa to an array
    cold_bc = np.where(partofa1=='COLD')   # Vectorized approach to find face belonging to part 'COLD'
    hot_bc = np.where(partofa1=='HOT')   # Vectorized approach to find face belonging to part 'HOT'
    solid = np.where(partofa1=='SOLID')
    
    iterations = 2000
    
    #Temperature initialized at nodes and CVs
    Tn = np.zeros(nno)    
    
    # Initializing values for hot and cold boundary nodes
    Tn[np.unique(noofa[cold_bc])] = 300 
    Tn[np.unique(noofa[hot_bc])] = 500

    # Initializing CV temperatures
    Tcv = np.zeros(ncv)
    
    # Node-to-CV conversion sparse matrix
    An2cv = scysparse.csr_matrix((ncv,nno),dtype="float64")
    
    # CV-to-node conversion matrix for internal nodes. Note that boundary nodes will be treated within the matrix itself.
    Acv2n_int = scysparse.csr_matrix((nno-np.unique(noofa[cold_bc]).size-np.unique(noofa[hot_bc]).size,ncv),dtype="float64")  # Only takes interior points
    
    for i in np.arange(ncv):
        
        nnn = np.unique(noofa[faocv[i]]).size # Gives number of neighbouring nodes, for calculating weight for each position = 1/no. of surrounding nodes
        An2cv[i,np.unique(noofa[faocv[i]])] = 1./nnn
    # This assumes that boundary nodes are numbered the towards the very end. If not, then God help you!!
    for i in np.arange(nno):
        nncv = np.unique(cvofa[faono[i]]).size  # No. of neighbouring CVs of the ndoe
        flag = 0 # flag to check if node is boundary node 
        fa1 = partofa1[faono[i]] # corresponding array of parts to which the faces containing the node belong to
        for k,pn in enumerate(fa1): # Checking if any of the part names correspond to the boundary
            if pn == 'COLD':
                flag = 1
            if pn == 'HOT':
                flag = 1
        if flag == 0: # faces which do not belong to the boundary
            Acv2n_int[i,np.unique(cvofa[faono[i]])] = 1./nncv # Averaging surrounding CV termperatures and storing them in Tn. Note that np.unique ensures that each CV is accounted for only once        
    n = 1 # Counter for no. of iterations
     
    while n <= iterations:
        
        # Updating CV centre values 
        Tcv = An2cv.dot(Tn)
        
        #Updating node values based on CV centres
        Tn_int = Acv2n_int.dot(Tcv)        
        
        #Assuming that All boundary nodes are loacted towards the very end of Tn
        Tn[0:Tn_int.size] = Tn_int        
        
#        print "Iteration: %d" %(n)
        n += 1 # Coded for 10000 iterations
    
    stop_time2 = time.time() - start_time2    
    print "Time required to execute matrix solve: %2.10f" %(stop_time2)
    plot_data.plot_data(xy_no[:,0],xy_no[:,1],Tn,"Final Temperature Contour, matrix solve.pdf")


    start_time1 = time.time()  # Time Starts
    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa) # Converting partofa to an array
    cold_bc = np.where(partofa1=='COLD')   # Vectorized approach to find face belonging to part 'COLD'
    hot_bc = np.where(partofa1=='HOT')   # Vectorized approach to find face belonging to part 'HOT'
    
    #Temperature initialized at nodes and CVs
    Tn = np.zeros(nno)    
    
    # Initializing values for hot and cold boundary nodes
    Tn[np.unique(noofa[cold_bc])] = 300 
    Tn[np.unique(noofa[hot_bc])] = 500
    
    # Initializing CV temperatures
    Tcv = np.zeros(ncv)
    
    n = 1 # Counter for no. of iterations
    
    while n <= iterations:
        # Looping over each CV
        for i in np.arange(ncv):
            Tcv[i] = np.average(Tn[np.unique(noofa[faocv[i]])]) # Averaging surrounding nodal termperatures and storing them in Tcv. Note that np.unique ensures that each node is accounted for only once
        for j in np.arange(nno):
            flag = 0 # flag to check if node is boundary node 
            fa1 = partofa1[faono[j]] # corresponding array of parts to which the faces containing the node belong to
            for k,pn in enumerate(fa1): # Checking if any of the part names correspond to the boundary
                if pn == 'COLD':
                    flag = 1
                if pn == 'HOT':
                    flag = 1
            if flag == 0: # faces which do not belong to the boundary
                Tn[j] = np.average(Tcv[np.unique(cvofa[faono[j]])]) # Averaging surrounding CV termperatures and storing them in Tn. Note that np.unique ensures that each CV is accounted for only once
#        print "Iteration: %d" %(n)
        n += 1 # Coded for 10000 iterations
    stop_time1 = time.time() - start_time1    
    print "Time required to execute for looping solve: %2.10f" %(stop_time1)
    plot_data.plot_data(xy_no[:,0],xy_no[:,1],Tn,"Final Temperature Contour, looping solve.pdf")
    
if p2:
    
    e_RMS = np.zeros(4) # RMS Error
    NCV = np.zeros(4) # Array to store the no. of CVs in each mesh
    
    #Mesh1
    print "Mesh A"
    icemcfd_project_folder = './'
#    filename = '/mesh_set_1/heated_rod_ncv=103.msh'
    filename = 'mesh_set/Mesh1.msh'
    figure_folder = "../report/"

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(mshfile_fullpath,node_reordering=True)    
    
    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    NCV[0] = ncv
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa) # Converting partofa to an array
    
    u = (xy_no[:,1])*(xy_no[:,0]**2) #x-component of velocity
    v = -(xy_no[:,0])*(xy_no[:,1]**2) #y-component of velocity
    
    name = "Quiver_Plot_for_velocity _on_Mesh_A.pdf"
    figure_name = figure_folder + name
    figwidth       = 10
    figheight      = 8
    lineWidth      = 3
    textFontSize   = 10
    gcafontSize    = 14
    fig = plt.figure(0, figsize=(figwidth,figheight))
    plt.quiver(xy_no[:,0],xy_no[:,1],u,v)
    print "Saving figure: "+ figure_name
    plt.savefig(figure_name)
    plt.close()
    
    NORMAL = [] #blank normal array to be filled up
    AREA = np.zeros(ncv)
    #Pre-processing and finding normals over all CVs
    for i in np.arange(ncv):
        nocv = noofa[faocv[i]]   # Nodal pairs for each face of the CV
        face_co = xy_fa[faocv[i]] # Face centroids of each face of CV
        check_vecs = face_co - xy_cv[i] #Vectors from CV centre to face centre
        par_x = xy_no[nocv[:,1],0] - xy_no[nocv[:,0],0] #x-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        par_y = xy_no[nocv[:,1],1] - xy_no[nocv[:,0],1] #y-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        normal_fa = np.c_[-par_y,par_x]  #Defining normal vector to faces. Convention, normal is 90* clock-wise.
        dir_check = normal_fa[:,0]*check_vecs[:,0] + normal_fa[:,1]*check_vecs[:,1] # Checks if normal_fa is aligned in the same direction as check_vecs.
        normal_fa[np.where(dir_check<0)] = -normal_fa[np.where(dir_check<0)] # Flips sign of components in normal_fa where the dot product i.e. dir_check is negative
        NORMAL.append(normal_fa) # Spits out all normals indexed by Cvs
        #Calculating areas of CV assuming rectangles or triangles
        if np.size(faocv[i]) == 3:
            area_cv = np.abs(0.5*((par_x[0]*par_y[1]) - (par_x[1]*par_y[0])))
            AREA[i] = area_cv
            
        if np.size(faocv[i]) == 4:
            area_cv = max(np.abs((par_x[0]*par_y[1]) - (par_x[1]*par_y[0])),np.abs((par_x[0]*par_y[2]) - (par_x[2]*par_y[0])))
            AREA[i] = area_cv
        
    Dx_n2cv = scysparse.csr_matrix((ncv,nno),dtype="float64") #Creating x part of operator
    Dy_n2cv = scysparse.csr_matrix((ncv,nno),dtype="float64") #Creating y part of operator
    
    DIVERGENCE_2 = np.zeros(ncv) #Divergence stored here
    
    for jj in np.arange(ncv):
        normal = NORMAL[jj] #Normals of the CV
        nocv = noofa[faocv[jj]] #Finding nodes in order of faces
        #Works as there are utmost 4 nodes right now. Dont know how slow it will be for higher order element shapes
        for ii,nn in enumerate(nocv[:,0]):
            Dx_n2cv[jj,nn] += 0.5*normal[ii,0]/AREA[jj]
            Dy_n2cv[jj,nn] += 0.5*normal[ii,1]/AREA[jj]
            
        for ii,nn in enumerate(nocv[:,1]):
            Dx_n2cv[jj,nn] += 0.5*normal[ii,0]/AREA[jj]
            Dy_n2cv[jj,nn] += 0.5*normal[ii,1]/AREA[jj]
            
            
    DIVERGENCE_2 = Dx_n2cv.dot(u) + Dy_n2cv.dot(v)
    e_RMS[0] = np.sqrt(sum(np.multiply(DIVERGENCE_2,AREA))**2/ncv)
    plot_data.plot_data(xy_cv[:,0],xy_cv[:,1],DIVERGENCE_2,"Flooded_Contour_of_Divergence_Mesh_A.pdf")
    
    #Mesh2
    print "Mesh B"
    icemcfd_project_folder = './'
#    filename = '/mesh_set_1/heated_rod_ncv=103.msh'
    filename = 'mesh_set/Mesh2.msh'
    figure_folder = "../report/"

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(mshfile_fullpath,node_reordering=True)    
    
    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    NCV[1] = ncv
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa) # Converting partofa to an array
    
    u = xy_no[:,1]*(xy_no[:,0]**2) #x-component of velocity
    v = -xy_no[:,0]*(xy_no[:,1]**2) #y-component of velocity
    
    NORMAL = [] #blank normal array to be filled up
    AREA = np.zeros(ncv)
    #Pre-processing and finding normals over all CVs
    for i in np.arange(ncv):
        nocv = noofa[faocv[i]]   # Nodal pairs for each face of the CV
        face_co = xy_fa[faocv[i]] # Face centroids of each face of CV
        check_vecs = face_co - xy_cv[i] #Vectors from CV centre to face centre
        par_x = xy_no[nocv[:,1],0] - xy_no[nocv[:,0],0] #x-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        par_y = xy_no[nocv[:,1],1] - xy_no[nocv[:,0],1] #y-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        normal_fa = np.c_[-par_y,par_x]  #Defining normal vector to faces. Convention, normal is 90* clock-wise.
        dir_check = normal_fa[:,0]*check_vecs[:,0] + normal_fa[:,1]*check_vecs[:,1] # Checks if normal_fa is aligned in the same direction as check_vecs.
        normal_fa[np.where(dir_check<0)] = -normal_fa[np.where(dir_check<0)] # Flips sign of components in normal_fa where the dot product i.e. dir_check is negative
        NORMAL.append(normal_fa) # Spits out all normals indexed by Cvs
        #Calculating areas of CV assuming rectangles or triangles
        if np.size(faocv[i]) == 3:
            area_cv = np.abs(0.5*((par_x[0]*par_y[1]) - (par_x[1]*par_y[0])))
            AREA[i] = area_cv
            
        if np.size(faocv[i]) == 4:
            area_cv = max(np.abs((par_x[0]*par_y[1]) - (par_x[1]*par_y[0])),np.abs((par_x[0]*par_y[2]) - (par_x[2]*par_y[0])))
            AREA[i] = area_cv
        
    Dx_n2cv = scysparse.csr_matrix((ncv,nno),dtype="float64") #Creating x part of operator
    Dy_n2cv = scysparse.csr_matrix((ncv,nno),dtype="float64") #Creating y part of operator
    
    DIVERGENCE_2 = np.zeros(ncv) #Divergence stored here
    
    for jj in np.arange(ncv):
        normal = NORMAL[jj] #Normals of the CV
        nocv = noofa[faocv[jj]] #Finding nodes in order of faces
        #Works as there are utmost 4 nodes right now. Dont know how slow it will be for higher order element shapes
        for ii,nn in enumerate(nocv[:,0]):
            Dx_n2cv[jj,nn] += 0.5*normal[ii,0]/AREA[jj]
            Dy_n2cv[jj,nn] += 0.5*normal[ii,1]/AREA[jj]
            
        for ii,nn in enumerate(nocv[:,1]):
            Dx_n2cv[jj,nn] += 0.5*normal[ii,0]/AREA[jj]
            Dy_n2cv[jj,nn] += 0.5*normal[ii,1]/AREA[jj]
            
            
    DIVERGENCE_2 = Dx_n2cv.dot(u) + Dy_n2cv.dot(v)
    
    e_RMS[1] = np.sqrt(sum(np.multiply(DIVERGENCE_2,AREA))**2/ncv)
        
    plot_data.plot_data(xy_cv[:,0],xy_cv[:,1],DIVERGENCE_2,"Flooded_Contour_of_Divergence_Mesh_B.pdf")
    
    #Mesh3
    print "Mesh C"
    icemcfd_project_folder = './'
#    filename = '/mesh_set_1/heated_rod_ncv=103.msh'
    filename = 'mesh_set/Mesh3.msh'
    figure_folder = "../report/"

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(mshfile_fullpath,node_reordering=True)    
    
    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    NCV[2] = ncv
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa) # Converting partofa to an array
    
    u = xy_no[:,1]*(xy_no[:,0]**2) #x-component of velocity
    v = -xy_no[:,0]*(xy_no[:,1]**2) #y-component of velocity
    
    NORMAL = [] #blank normal array to be filled up
    AREA = np.zeros(ncv)
    #Pre-processing and finding normals over all CVs
    for i in np.arange(ncv):
        nocv = noofa[faocv[i]]   # Nodal pairs for each face of the CV
        face_co = xy_fa[faocv[i]] # Face centroids of each face of CV
        check_vecs = face_co - xy_cv[i] #Vectors from CV centre to face centre
        par_x = xy_no[nocv[:,1],0] - xy_no[nocv[:,0],0] #x-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        par_y = xy_no[nocv[:,1],1] - xy_no[nocv[:,0],1] #y-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        normal_fa = np.c_[-par_y,par_x]  #Defining normal vector to faces. Convention, normal is 90* clock-wise.
        dir_check = normal_fa[:,0]*check_vecs[:,0] + normal_fa[:,1]*check_vecs[:,1] # Checks if normal_fa is aligned in the same direction as check_vecs.
        normal_fa[np.where(dir_check<0)] = -normal_fa[np.where(dir_check<0)] # Flips sign of components in normal_fa where the dot product i.e. dir_check is negative
        NORMAL.append(normal_fa) # Spits out all normals indexed by Cvs
        #Calculating areas of CV assuming rectangles or triangles
        if np.size(faocv[i]) == 3:
            area_cv = np.abs(0.5*((par_x[0]*par_y[1]) - (par_x[1]*par_y[0])))
            AREA[i] = area_cv
            
        if np.size(faocv[i]) == 4:
            area_cv = max(np.abs((par_x[0]*par_y[1]) - (par_x[1]*par_y[0])),np.abs((par_x[0]*par_y[2]) - (par_x[2]*par_y[0])))
            AREA[i] = area_cv
    
    Dx_n2cv = scysparse.csr_matrix((ncv,nno),dtype="float64") #Creating x part of operator
    Dy_n2cv = scysparse.csr_matrix((ncv,nno),dtype="float64") #Creating y part of operator
    
    DIVERGENCE_2 = np.zeros(ncv) #Divergence stored here
    
    for jj in np.arange(ncv):
        normal = NORMAL[jj] #Normals of the CV
        nocv = noofa[faocv[jj]] #Finding nodes in order of faces
        #Works as there are utmost 4 nodes right now. Dont know how slow it will be for higher order element shapes
        for ii,nn in enumerate(nocv[:,0]):
            Dx_n2cv[jj,nn] += 0.5*normal[ii,0]/AREA[jj]
            Dy_n2cv[jj,nn] += 0.5*normal[ii,1]/AREA[jj]
            
        for ii,nn in enumerate(nocv[:,1]):
            Dx_n2cv[jj,nn] += 0.5*normal[ii,0]/AREA[jj]
            Dy_n2cv[jj,nn] += 0.5*normal[ii,1]/AREA[jj]
            
            
    DIVERGENCE_2 = Dx_n2cv.dot(u) + Dy_n2cv.dot(v)
    
    e_RMS[2] = np.sqrt(sum(np.multiply(DIVERGENCE_2,AREA))**2/ncv)
        
    plot_data.plot_data(xy_cv[:,0],xy_cv[:,1],DIVERGENCE_2,"Flooded_Contour_of_Divergence_Mesh_C.pdf")
    
    #Mesh4
    print "Mesh D"
    icemcfd_project_folder = './'
#    filename = '/mesh_set_1/heated_rod_ncv=103.msh'
    filename = 'mesh_set/Mesh4.msh'
    figure_folder = "../report/"

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(mshfile_fullpath,node_reordering=True)    
    
    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    NCV[3] = ncv
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa) # Converting partofa to an array
    
    u = xy_no[:,1]*(xy_no[:,0]**2) #x-component of velocity
    v = -xy_no[:,0]*(xy_no[:,1]**2) #y-component of velocity
    
    NORMAL = [] #blank normal array to be filled up
    AREA = np.zeros(ncv)
    #Pre-processing and finding normals over all CVs
    for i in np.arange(ncv):
        nocv = noofa[faocv[i]]   # Nodal pairs for each face of the CV
        face_co = xy_fa[faocv[i]] # Face centroids of each face of CV
        check_vecs = face_co - xy_cv[i] #Vectors from CV centre to face centre
        par_x = xy_no[nocv[:,1],0] - xy_no[nocv[:,0],0] #x-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        par_y = xy_no[nocv[:,1],1] - xy_no[nocv[:,0],1] #y-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        normal_fa = np.c_[-par_y,par_x]  #Defining normal vector to faces. Convention, normal is 90* clock-wise.
        dir_check = normal_fa[:,0]*check_vecs[:,0] + normal_fa[:,1]*check_vecs[:,1] # Checks if normal_fa is aligned in the same direction as check_vecs.
        normal_fa[np.where(dir_check<0)] = -normal_fa[np.where(dir_check<0)] # Flips sign of components in normal_fa where the dot product i.e. dir_check is negative
        NORMAL.append(normal_fa) # Spits out all normals indexed by Cvs
        #Calculating areas of CV assuming rectangles or triangles
        if np.size(faocv[i]) == 3:
            area_cv = np.abs(0.5*((par_x[0]*par_y[1]) - (par_x[1]*par_y[0])))
            AREA[i] = area_cv
            
        if np.size(faocv[i]) == 4:
            area_cv = max(np.abs((par_x[0]*par_y[1]) - (par_x[1]*par_y[0])),np.abs((par_x[0]*par_y[2]) - (par_x[2]*par_y[0])))
            AREA[i] = area_cv
    
    Dx_n2cv = scysparse.csr_matrix((ncv,nno),dtype="float64") #Creating x part of operator
    Dy_n2cv = scysparse.csr_matrix((ncv,nno),dtype="float64") #Creating y part of operator
    
    DIVERGENCE_2 = np.zeros(ncv) #Divergence stored here
    
    for jj in np.arange(ncv):
        normal = NORMAL[jj] #Normals of the CV
        nocv = noofa[faocv[jj]] #Finding nodes in order of faces
        #Works as there are utmost 4 nodes right now. Dont know how slow it will be for higher order element shapes
        for ii,nn in enumerate(nocv[:,0]):
            Dx_n2cv[jj,nn] += 0.5*normal[ii,0]/AREA[jj]
            Dy_n2cv[jj,nn] += 0.5*normal[ii,1]/AREA[jj]
            
        for ii,nn in enumerate(nocv[:,1]):
            Dx_n2cv[jj,nn] += 0.5*normal[ii,0]/AREA[jj]
            Dy_n2cv[jj,nn] += 0.5*normal[ii,1]/AREA[jj]
            
            
    DIVERGENCE_2 = Dx_n2cv.dot(u) + Dy_n2cv.dot(v)
    e_RMS[3] = np.sqrt(sum(np.multiply(DIVERGENCE_2,AREA))**2/ncv)
        
    plot_data.plot_data(xy_cv[:,0],xy_cv[:,1],DIVERGENCE_2,"Flooded_Contour_of_Divergence_Mesh_D.pdf")
    
    # RMS rror plot
    name = "RMS_Error_for_Divergence.pdf"
    figure_name = figure_folder + name
    figwidth       = 10
    figheight      = 8
    lineWidth      = 3
    textFontSize   = 10
    gcafontSize    = 14
    fig = plt.figure(0, figsize=(figwidth,figheight))
    plt.loglog(NCV,e_RMS,'-k',label="RMS Error")
    plt.loglog(NCV,NCV**-1,'--r',label='Order 1')
    plt.loglog(NCV,NCV**-2,'--b',label='Order 2')
    plt.loglog(NCV,NCV**-3,'--g',label='Order 3')
    plt.xlabel("No. of CVs")
    plt.ylabel(r"RMS Error")
    plt.legend(loc='best')
    print "Saving figure: "+ figure_name
    plt.savefig(figure_name)
    plt.close()
コード例 #6
0
def Divergence_val(filename, mesh_no):

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(
        mshfile_fullpath, node_reordering=True)

    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa)  # Converting partofa to an array
    faono1 = np.array(faono)  # Converting faono to an array
    cold_bc = np.where(
        partofa1 ==
        'COLD')  # Vectorized approach to find face belonging to part 'COLD'
    hot_bc = np.where(
        partofa1 ==
        'HOT')  # Vectorized approach to find face belonging to part 'HOT'
    solid = np.where(
        partofa1 ==
        'SOLID')  # Vectorized approach to find face belonging to part 'SOLID'

    # Find part to which each node belongs to
    partono = []
    for j in np.arange(nno):
        part_name = partofa1[faono1[j]]
        hot_count = np.array(np.where(part_name == 'HOT')).size
        cold_count = np.array(np.where(part_name == 'COLD')).size
        if cold_count != 0:
            partono.append('COLD')
        elif hot_count != 0:
            partono.append('HOT')
        else:
            partono.append('SOLID')

    partono1 = np.array(partono)

    nfa_int = np.size(np.where(partofa1 == 'SOLID'))
    # Divergence operator
    Dx_f2cv = scysparse.csr_matrix(
        (ncv, nfa_int), dtype="float64")  #Creating x part of operator
    Dy_f2cv = scysparse.csr_matrix(
        (ncv, nfa_int), dtype="float64")  #Creating y part of operator

    q_bc = np.zeros(ncv)
    u_bc = np.zeros(nfa)
    v_bc = np.zeros(nfa)

    u = (xy_fa[:, 1]) * (xy_fa[:, 0]**2)  #x-component of velocity
    v = -(xy_fa[:, 0]) * (xy_fa[:, 1]**2)  #y-component of velocity

    for l in np.arange(nfa):
        if partofa1[l] != 'SOLID':
            u_bc[l] = u[l]
            v_bc[l] = v[l]

    NORMAL = []  #blank normal array to be filled up
    AREA = np.zeros(ncv)

    #Pre-processing and finding normals over all CVs
    for i in np.arange(ncv):
        nocv = noofa[faocv[i]]  # Nodal pairs for each face of the CV
        face_co = xy_fa[faocv[i]]  # Face centroids of each face of CV
        check_vecs = face_co - xy_cv[i]  #Vectors from CV centre to face centre
        par_x = xy_no[nocv[:, 1], 0] - xy_no[
            nocv[:, 0],
            0]  #x-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        par_y = xy_no[nocv[:, 1], 1] - xy_no[
            nocv[:, 0],
            1]  #y-component of vector parallel to face. Convention, 2nd point - 1st point in nocv
        normal_fa = np.c_[
            -par_y,
            par_x]  #Defining normal vector to faces. Convention, normal is 90* clock-wise.
        dir_check = normal_fa[:,
                              0] * check_vecs[:,
                                              0] + normal_fa[:,
                                                             1] * check_vecs[:,
                                                                             1]  # Checks if normal_fa is aligned in the same direction as check_vecs.
        normal_fa[np.where(dir_check < 0)] = -normal_fa[np.where(
            dir_check < 0
        )]  # Flips sign of components in normal_fa where the dot product i.e. dir_check is negative
        NORMAL.append(normal_fa)  # Spits out all normals indexed by Cvs

        #Calculating areas of CV assuming rectangles or triangles
        if np.size(faocv[i]) == 3:
            area_cv = np.abs(0.5 * ((par_x[0] * par_y[1]) -
                                    (par_x[1] * par_y[0])))
            AREA[i] = area_cv

        if np.size(faocv[i]) == 4:
            area_cv = max(
                np.abs((par_x[0] * par_y[1]) - (par_x[1] * par_y[0])),
                np.abs((par_x[0] * par_y[2]) - (par_x[2] * par_y[0])))
            AREA[i] = area_cv

    for j in np.arange(ncv):
        normal = NORMAL[j]  # Normals of the CV
        #Works as there are utmost 4 nodes right now. Dont know how slow it will be for higher order element shapes
        for ii, nn in enumerate(faocv[j]):
            if partofa1[nn] == 'SOLID':
                Dx_f2cv[j, nn] += normal[ii, 0] / AREA[j]
                Dy_f2cv[j, nn] += normal[ii, 1] / AREA[j]

            else:
                q_bc[j] += u_bc[nn] * normal[
                    ii, 0] / AREA[j] + v_bc[nn] * normal[ii, 1] / AREA[j]

    DIVERGENCE = (Dx_f2cv.dot(u[:nfa_int])) + (Dy_f2cv.dot(v[:nfa_int])) + q_bc

    e_RMS = np.sqrt(np.average(DIVERGENCE**2))

    plot_data.plot_data(
        xy_cv[:, 0], xy_cv[:, 1], DIVERGENCE,
        "Mesh " + str(mesh_no) + "Flooded_Contour_of_Divergence.pdf")

    plt.spy(Dx_f2cv)
    plt.savefig(figure_folder + "Mesh " + str(mesh_no) + ": Spy of Dx.pdf")
    plt.close()

    plt.spy(Dy_f2cv)
    plt.savefig(figure_folder + "Mesh " + str(mesh_no) + ": Spy of Dy.pdf")
    plt.close()

    return (Dx_f2cv, Dy_f2cv, q_bc, e_RMS)
コード例 #7
0
def Gradient(filename, mesh_no, flag=0):

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(
        mshfile_fullpath, node_reordering=True)

    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa)  # Converting partofa to an array
    faono1 = np.array(faono)  # Converting faono to an array
    cold_bc = np.where(
        partofa1 ==
        'COLD')  # Vectorized approach to find face belonging to part 'COLD'
    hot_bc = np.where(
        partofa1 ==
        'HOT')  # Vectorized approach to find face belonging to part 'HOT'
    solid = np.where(
        partofa1 ==
        'SOLID')  # Vectorized approach to find face belonging to part 'SOLID'

    # Find part to which each node belongs to
    partono = []
    for j in np.arange(nno):
        part_name = partofa1[faono1[j]]
        hot_count = np.array(np.where(part_name == 'HOT')).size
        cold_count = np.array(np.where(part_name == 'COLD')).size
        if cold_count != 0:
            partono.append('COLD')
        elif hot_count != 0:
            partono.append('HOT')
        else:
            partono.append('SOLID')

    partono1 = np.array(partono)

    Gx_int = scysparse.csr_matrix(
        (ncv, ncv))  # Matrix to calculate X-gradient at CVs
    Gy_int = scysparse.csr_matrix(
        (ncv, ncv))  # Matrix to calculate Y-gradient at CVs

    nfa_int = np.size(np.where(partofa1 == 'SOLID'))
    Avg_cv2f = scysparse.csr_matrix((nfa_int, ncv))

    for i in np.arange(ncv):
        neigh_cv = np.unique(
            cvofa[faocv[i]])  # Gives neighbouring CVs including the central CV
        neigh_cv = np.delete(
            neigh_cv, np.where(neigh_cv == i)
        )  # Find index of central CV and delete that entry from neighbouring CV array
        neigh_cv = np.delete(
            neigh_cv, np.where(neigh_cv == -1)
        )  # Find index of boundary CV and delete the -1 entry from neighbouring CV array
        dx_ik = (xy_cv[neigh_cv, 0] - xy_cv[i, 0]
                 )  # Stores dx for all neighbouring CVs
        dy_ik = (xy_cv[neigh_cv, 1] - xy_cv[i, 1]
                 )  # Stores dy for all neighbouring CVs
        w_ik = 1. / np.sqrt(
            (dx_ik**2) + (dy_ik**2))  # Array of weights for least-squared fit
        a_ik = sum((w_ik * dx_ik)**2)
        b_ik = sum(
            ((w_ik)**2) * dx_ik * dy_ik
        )  #Co-efficients a_ik, b_ik, c_ik from least-squared fitting algorithm.
        c_ik = sum((w_ik * dy_ik)**2)

        det = (a_ik * c_ik) - (b_ik**2)

        # Filling out weights for collocation point
        Gx_int[i, i] -= sum(((c_ik * ((w_ik)**2) * dx_ik) -
                             (b_ik * ((w_ik)**2) * dy_ik)) / det)
        Gy_int[i, i] -= sum(((a_ik * ((w_ik)**2) * dy_ik) -
                             (b_ik * ((w_ik)**2) * dx_ik)) / det)

        for j, n in enumerate(neigh_cv):
            Gx_int[i, n] += ((c_ik * ((w_ik[j])**2) * dx_ik[j]) -
                             (b_ik * ((w_ik[j])**2) * dy_ik[j])) / det
            Gy_int[i, n] += ((a_ik * ((w_ik[j])**2) * dy_ik[j]) -
                             (b_ik * ((w_ik[j])**2) * dx_ik[j])) / det

    for ii in np.arange(nfa_int):
        cvs = cvofa[ii]
        Avg_cv2f[ii, cvs] = 0.5

    Gx = Avg_cv2f * Gx_int
    Gy = Avg_cv2f * Gy_int
    if flag == 1:
        # Validation of gradient evaluation

        phi_cv = analytical_f(xy_cv[:, 0], xy_cv[:, 1])

        grad_phi_analytical_x = grad_x(xy_fa[:nfa_int, 0], xy_fa[:nfa_int, 1])

        grad_phi_analytical_y = grad_y(xy_fa[:nfa_int, 0], xy_fa[:nfa_int, 1])

        grad_phi_num_x = Gx * phi_cv
        grad_phi_num_y = Gy * phi_cv

        plt.quiver(xy_fa[:nfa_int, 0],
                   xy_fa[:nfa_int, 1],
                   grad_phi_analytical_x,
                   grad_phi_analytical_y,
                   color='b')
        plt.quiver(xy_fa[:nfa_int, 0],
                   xy_fa[:nfa_int, 1],
                   grad_phi_num_x,
                   grad_phi_num_y,
                   color='r')
        print "Saving figure: " + figure_folder + "Mesh" + str(
            mesh_no) + "Quiver plot for gradient.pdf"
        plt.savefig(figure_folder + "Mesh" + str(mesh_no) +
                    "Quiver_gradient.pdf")
        plt.close()

        plt.spy(Gx)
        plt.savefig(figure_folder + "Mesh " + str(mesh_no) + ": Spy of Gx.pdf")
        plt.close()

        plt.spy(Gy)
        plt.savefig(figure_folder + "Mesh " + str(mesh_no) + ": Spy of Gy.pdf")
        plt.close()

    return (Gx, Gy)
コード例 #8
0
def poisson_solver_nc_ds(filename, mesh_no, flag=0):

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(
        mshfile_fullpath, node_reordering=True)

    # Two slices
    x_slice_1 = np.linspace(1.0, 1.5, 51)
    y_slice_1 = np.linspace(0.1, 0.4, 51)

    x_slice_2 = 1.5 * np.ones(51)
    y_slice_2 = np.linspace(0.3, 0.8, 51)

    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa)  # Converting partofa to an array
    faono1 = np.array(faono)  # Converting faono to an array
    cold_bc = np.where(
        partofa1 ==
        'COLD')  # Vectorized approach to find face belonging to part 'COLD'
    hot_bc = np.where(
        partofa1 ==
        'HOT')  # Vectorized approach to find face belonging to part 'HOT'
    solid = np.where(
        partofa1 ==
        'SOLID')  # Vectorized approach to find face belonging to part 'SOLID'

    # Find part to which each node belongs to
    partono = []
    for j in np.arange(nno):
        part_name = partofa1[faono1[j]]
        hot_count = np.array(np.where(part_name == 'HOT')).size
        cold_count = np.array(np.where(part_name == 'COLD')).size
        if cold_count != 0:
            partono.append('COLD')
        elif hot_count != 0:
            partono.append('HOT')
        else:
            partono.append('SOLID')

    partono1 = np.array(partono)

    phi = np.zeros(nno)  #Full phi

    #Temperature for internal nodes
    phi_int = np.zeros(
        nno - np.unique(noofa[cold_bc]).size -
        np.unique(noofa[hot_bc]).size)  # Define zero internal nodes
    nno_int = phi_int.size  # No. of internal nodes

    # Initializing values for hot and cold boundary nodes
    phi_cold = 300
    phi_hot = 500

    #Defining boundary values in phi
    phi[[np.unique(noofa[cold_bc])]] = phi_cold
    phi[[np.unique(noofa[hot_bc])]] = phi_hot

    source = -1.

    if mesh_no == 3:

        # For bivariate fit validation
        max_nn = np.zeros(nno_int)

    A = scysparse.csr_matrix(
        (nno_int, nno_int
         ))  # Thank God that boundary nodes get numbered towards the end!
    b = source * np.ones(nno_int)

    for i in np.arange(nno_int):
        n_nodes = np.unique(
            noofa[faono[i]])  # Neighbouring nodes for the collocation point
        if mesh_no == 3:
            max_nn[i] = n_nodes.size
        x_stencil = xy_no[
            n_nodes,
            0]  # X-co-ordinates of neighbouring nodes of ith node. All of them have been taken as stencil
        y_stencil = xy_no[
            n_nodes,
            1]  # Y-co-ordinates of neighbouring nodes of ith node. All of them have been taken as stencil
        xc = xy_no[i, 0]  # X-co-ordinates of centroid
        yc = xy_no[i, 1]  # Y-co-ordinates of centroid

        # Weights for 2nd derivative for all stencil points
        weights_dx2 = np.zeros(len(n_nodes))
        weights_dy2 = np.zeros(len(n_nodes))
        for ino in range(0, len(n_nodes)):
            phi_base = np.zeros(len(n_nodes))
            phi_base[ino] = 1.0
            _, _, weights_dx2[ino] = fit.BiVarPolyFit_X(
                xc, yc, x_stencil, y_stencil, phi_base)
            _, _, weights_dy2[ino] = fit.BiVarPolyFit_Y(
                xc, yc, x_stencil, y_stencil, phi_base)

        parts = partono1[n_nodes]
        for jj, node in enumerate(n_nodes):
            if parts[jj] == 'COLD':
                b[i] -= phi_cold * (weights_dx2[jj] + weights_dy2[jj])
            elif parts[jj] == 'HOT':
                b[i] -= phi_hot * (weights_dx2[jj] + weights_dy2[jj])
            else:
                A[i, node] += weights_dx2[jj] + weights_dy2[jj]

    if mesh_no == 3:
        pol_validation(mesh_no, max_nn)

    start_time1 = default_timer()
    phi_int = splinalg.spsolve(A, b)
    end_time1 = default_timer() - start_time1
    phi[:nno_int] = phi_int
    plot_data.plot_data(
        xy_no[:, 0], xy_no[:, 1], phi,
        "Mesh " + str(mesh_no) + ": Final Temperature Field Direct Solve.pdf")

    plt.spy(A)
    plt.savefig(figure_folder + "Mesh " + str(mesh_no) + ": Spy of A.pdf")
    plt.close()

    slice_1 = griddata(np.vstack(
        (xy_no[:, 0].flatten(), xy_no[:, 1].flatten())).T,
                       np.vstack(phi.flatten()), (x_slice_1, y_slice_1),
                       method="cubic")
    slice_2 = griddata(np.vstack(
        (xy_no[:, 0].flatten(), xy_no[:, 1].flatten())).T,
                       np.vstack(phi.flatten()), (x_slice_2, y_slice_2),
                       method="cubic")

    if flag == 1:

        print "The time required for execution of spsolve is: 2.10f" % (
            end_time1)
        return (A)

    else:

        return (A, phi, slice_1, slice_2)
コード例 #9
0
def Gauss_Siedel(filename, max_it, tol, omega=1.):

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(
        mshfile_fullpath, node_reordering=True)

    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa)  # Converting partofa to an array
    faono1 = np.array(faono)  # Converting faono to an array
    cold_bc = np.where(
        partofa1 ==
        'COLD')  # Vectorized approach to find face belonging to part 'COLD'
    hot_bc = np.where(
        partofa1 ==
        'HOT')  # Vectorized approach to find face belonging to part 'HOT'
    solid = np.where(
        partofa1 ==
        'SOLID')  # Vectorized approach to find face belonging to part 'SOLID'

    # Find part to which each node belongs to
    partono = []
    for j in np.arange(nno):
        part_name = partofa1[faono1[j]]
        hot_count = np.array(np.where(part_name == 'HOT')).size
        cold_count = np.array(np.where(part_name == 'COLD')).size
        if cold_count != 0:
            partono.append('COLD')
        elif hot_count != 0:
            partono.append('HOT')
        else:
            partono.append('SOLID')

    partono1 = np.array(partono)

    phi = np.zeros(nno)  #Full phi

    #Temperature for internal nodes
    phi_int = np.zeros(
        nno - np.unique(noofa[cold_bc]).size -
        np.unique(noofa[hot_bc]).size)  # Define zero internal nodes
    nno_int = phi_int.size  # No. of internal nodes

    # Initializing values for hot and cold boundary nodes
    phi_cold = 300
    phi_hot = 500

    #Defining boundary values in phi
    phi[[np.unique(noofa[cold_bc])]] = phi_cold
    phi[[np.unique(noofa[hot_bc])]] = phi_hot

    source = -1.
    A = scysparse.csr_matrix(
        (nno_int, nno_int
         ))  # Thank God that boundary nodes get numbered towards the end!

    b = source * np.ones(nno_int)

    for i in np.arange(nno_int):
        n_nodes = np.unique(
            noofa[faono[i]])  # Neighbouring nodes for the collocation point
        x_stencil = xy_no[
            n_nodes,
            0]  # X-co-ordinates of neighbouring nodes of ith node. All of them have been taken as stencil
        y_stencil = xy_no[
            n_nodes,
            1]  # Y-co-ordinates of neighbouring nodes of ith node. All of them have been taken as stencil
        xc = xy_no[i, 0]  # X-co-ordinates of centroid
        yc = xy_no[i, 1]  # Y-co-ordinates of centroid

        # Weights for 2nd derivative for all stencil points
        weights_dx2 = np.zeros(len(n_nodes))
        weights_dy2 = np.zeros(len(n_nodes))
        for ino in range(0, len(n_nodes)):
            phi_base = np.zeros(len(n_nodes))
            phi_base[ino] = 1.0
            _, _, weights_dx2[ino] = fit.BiVarPolyFit_X(
                xc, yc, x_stencil, y_stencil, phi_base)
            _, _, weights_dy2[ino] = fit.BiVarPolyFit_Y(
                xc, yc, x_stencil, y_stencil, phi_base)

        parts = partono1[n_nodes]
        for jj, node in enumerate(n_nodes):
            if parts[jj] == 'COLD':
                b[i] -= phi_cold * (weights_dx2[jj] + weights_dy2[jj])
            elif parts[jj] == 'HOT':
                b[i] -= phi_hot * (weights_dx2[jj] + weights_dy2[jj])
            else:
                A[i, node] += weights_dx2[jj] + weights_dy2[jj]

    #Define A1 and A2 in A1(phi)^k+1 = A2(phi)^k + q
    A1 = scysparse.tril(A)
    A2 = -scysparse.triu(A, k=1)
    it = 1

    #Residual
    var = 1
    r_0 = np.linalg.norm(b)
    residual = np.ones(1)

    phi_old = 300 * np.ones(b.shape)
    #Specified tolerence for r_k/r_0
    print r"\omega = %2.1f" % (omega)
    print "Maximum number of iterations = %d" % (max_it)
    while var and it < max_it:
        Q = (A2 * phi_old) + b
        phi_star = splinalg.spsolve(A1, Q)
        #        phi_star = np.dot(scysparse.linalg.inv(A1),Q)
        phi = (omega * phi_star) + ((1 - omega) * phi_old)
        phi_old = phi
        r_k = np.linalg.norm(b - (A * phi))  #Vector norm of error

        print "Iteration: %d" % (it)
        print "Scaled residual: %2.14f" % (r_k / r_0)
        it += 1
        if (r_k / r_0) < tol:
            residual = np.concatenate([residual, [r_k]])

            break

        elif (np.isinf(r_k) == True):
            print "Iterative Solver failed to converge.."

            break

        residual = np.concatenate([residual, [r_k]])

    return A1, A2, phi, it, residual
コード例 #10
0
def Circle_validation(filename):

    mshfile_fullpath = icemcfd_project_folder + filename

    part_names, xy_no, xy_fa, xy_cv, noofa, cvofa, faono, faocv, partofa = umesh_reader.read_unstructured_grid(
        mshfile_fullpath, node_reordering=True)

    nno = xy_no.shape[0]  # No. of nodes
    ncv = xy_cv.shape[0]  # No. of CVs
    nfa = xy_fa.shape[0]  # No. of faces
    partofa1 = np.array(partofa)  # Converting partofa to an array
    faono1 = np.array(faono)  # Converting faono to an array
    hot_bc = np.where(
        partofa1 ==
        'COLD')  # Vectorized approach to find face belonging to part 'HOT'
    solid = np.where(
        partofa1 ==
        'SOLID')  # Vectorized approach to find face belonging to part 'SOLID'

    x_slice_1 = np.linspace(0.5, 0.7, 51)
    y_slice_1 = np.linspace(0.1, 0.4, 51)

    # Find part to which each node belongs to
    partono = []
    for j in np.arange(nno):
        part_name = partofa1[faono1[j]]
        hot_count = np.array(np.where(part_name == 'COLD')).size
        if hot_count != 0:
            partono.append('COLD')
        else:
            partono.append('SOLID')

    partono1 = np.array(partono)

    phi = np.zeros(nno)  #Full phi

    #Temperature for internal nodes
    phi_int = np.zeros(
        nno - np.unique(noofa[hot_bc]).size)  # Define zero internal nodes
    nno_int = phi_int.size  # No. of internal nodes

    # Initializing values for hot and cold boundary nodes
    phi_hot = 0.

    #Defining boundary values in phi
    phi[[np.unique(noofa[hot_bc])]] = phi_hot

    source = -1.

    A = scysparse.csr_matrix(
        (nno_int, nno_int
         ))  # Thank God that boundary nodes get numbered towards the end!

    b = source * np.ones(nno_int)

    for i in np.arange(nno_int):
        n_nodes = np.unique(
            noofa[faono[i]])  # Neighbouring nodes for the collocation point
        x_stencil = xy_no[
            n_nodes,
            0]  # X-co-ordinates of neighbouring nodes of ith node. All of them have been taken as stencil
        y_stencil = xy_no[
            n_nodes,
            1]  # Y-co-ordinates of neighbouring nodes of ith node. All of them have been taken as stencil
        xc = xy_no[i, 0]  # X-co-ordinates of centroid
        yc = xy_no[i, 1]  # Y-co-ordinates of centroid

        # Weights for 2nd derivative for all stencil points
        weights_dx2 = np.zeros(len(n_nodes))
        weights_dy2 = np.zeros(len(n_nodes))
        for ino in range(0, len(n_nodes)):
            phi_base = np.zeros(len(n_nodes))
            phi_base[ino] = 1.0
            _, _, weights_dx2[ino] = fit.BiVarPolyFit_X(
                xc, yc, x_stencil, y_stencil, phi_base)
            _, _, weights_dy2[ino] = fit.BiVarPolyFit_Y(
                xc, yc, x_stencil, y_stencil, phi_base)

        parts = partono1[n_nodes]
        for jj, node in enumerate(n_nodes):
            if parts[jj] == 'COLD':
                b[i] -= phi_hot * (weights_dx2[jj] + weights_dy2[jj])
            else:
                A[i, node] += weights_dx2[jj] + weights_dy2[jj]

    phi_int = splinalg.spsolve(A, b)
    phi[:nno_int] = phi_int
    #    plot_data.plot_data(xy_no[:,0],xy_no[:,1],np.abs(phi - circle_soln(xy_no[:,0],xy_no[:,1])),"Circle: Contour for error in solution.pdf")

    #    e_RMS = np.sqrt(np.average((phi - circle_soln(xy_no[:,0],xy_no[:,1]))))
    slice_1 = griddata(np.vstack(
        (xy_no[:, 0].flatten(), xy_no[:, 1].flatten())).T,
                       np.vstack(phi.flatten()), (x_slice_1, y_slice_1),
                       method="cubic")
    return slice_1