예제 #1
0
def Z_nearCRS_Assembling(processNumber_to_ChunksNumbers,
                         chunkNumber_to_cubesNumbers, MAX_BLOCK_SIZE, C,
                         ELEM_TYPE, Z_TMP_ELEM_TYPE, pathToReadFrom,
                         pathToSaveTo):
    """this function computes Z_CFIE_near by slices and stores them on the disk.
    The maximum size of a block is given by the variable MAX_BLOCK_SIZE in MegaBytes"""
    # test on MAX_BLOCK_SIZE
    if ((MAX_BLOCK_SIZE < 0.1) | (MAX_BLOCK_SIZE > 10000.)):
        print("Error: MAX_BLOCK_SIZE too big or too small")
        sys.exit(1)
    num_procs = MPI.COMM_WORLD.Get_size()
    my_id = MPI.COMM_WORLD.Get_rank()
    NAME = "Z_CFIE_near"
    if (my_id == 0):
        print("Number of leaf cubes = " + str(C))
        print("assembling Z_CFIE_near chunks...")
    chunkNumbers = processNumber_to_ChunksNumbers[my_id]
    for chunkNumber in chunkNumbers:
        cubesNumbers = chunkNumber_to_cubesNumbers[chunkNumber]
        pathToReadFromChunk = os.path.join(pathToReadFrom,
                                           "chunk" + str(chunkNumber))
        Z_CFIE_near, src_RWG_numbers, rowIndexToColumnIndexes, test_RWG_numbers = chunk_of_Z_nearCRS_Assembling(
            cubesNumbers, ELEM_TYPE, Z_TMP_ELEM_TYPE, pathToReadFromChunk)
        writeToDisk_chunk_of_Z_sparse(pathToSaveTo, NAME, Z_CFIE_near,
                                      src_RWG_numbers, rowIndexToColumnIndexes,
                                      test_RWG_numbers, chunkNumber)
        del Z_CFIE_near, src_RWG_numbers, rowIndexToColumnIndexes, test_RWG_numbers
        commands.getoutput("rm -rf " + os.path.join(pathToReadFromChunk))
    # we write the chunks numbers of the process
    writeASCIIBlitzArrayToDisk(
        array(chunkNumbers).astype('i'),
        os.path.join(pathToSaveTo, 'chunkNumbers.txt'))
예제 #2
0
def prepare_SAI(params_simu, simuDirName):
    my_id = MPI.COMM_WORLD.Get_rank()
    tmpDirName = os.path.join(simuDirName, 'tmp' + str(my_id))
    file = open(os.path.join(tmpDirName, 'pickle', 'variables.txt'), 'rb')
    variables = cPickle.load(file)
    file.close()
    # writing the chunk numbers (per process)
    chunkNumbers = variables['processNumber_to_ChunksNumbers'][my_id]
    writeASCIIBlitzArrayToDisk(
        array(chunkNumbers).astype('i'),
        os.path.join(tmpDirName, 'Mg_LeftFrob', 'chunkNumbers.txt'))
    # writing the cubes numbers (chunk-dependent)
    for chunk in chunkNumbers:
        writeASCIIBlitzArrayToDisk(
            array(variables['chunkNumber_to_cubesNumbers'][chunk]).astype('i'),
            os.path.join(tmpDirName, 'Mg_LeftFrob',
                         "chunk" + str(chunk) + 'cubesNumbers.txt'))
    # writing the chunk numbers (cube-dependent)
    writeASCIIBlitzArrayToDisk(
        array(variables['cubeNumber_to_chunkNumber']).astype('i'),
        os.path.join(tmpDirName, 'Mg_LeftFrob',
                     'cubeNumber_to_chunkNumber.txt'))
    variables['Wall_time_Mg_computation'] = 0.0
    variables['CPU_time_Mg_computation'] = 0.0
    file = open(os.path.join(tmpDirName, 'pickle', 'variables.txt'), 'wb')
    cPickle.dump(variables, file)
    file.close()
예제 #3
0
def compute_SAI(params_simu, simuDirName):
    my_id = MPI.COMM_WORLD.Get_rank()
    tmpDirName = os.path.join(simuDirName, 'tmp' + str(my_id))
    pathToReadFrom = os.path.join(tmpDirName, 'Z_tmp')
    pathToSaveTo = os.path.join(tmpDirName, 'Mg_LeftFrob')
    file = open(os.path.join(tmpDirName, 'pickle', 'variables.txt'), 'rb')
    variables = cPickle.load(file)
    file.close()
    #if my_id==0:
        ##print variables['chunkNumber_to_cubesNumbers']
        #print variables['cubeNumber_to_chunkNumber']
    # writing the chunk numbers (per process)
    chunkNumbers = variables['processNumber_to_ChunksNumbers'][my_id]
    writeASCIIBlitzArrayToDisk(array(chunkNumbers).astype('i'), os.path.join(pathToSaveTo, 'chunkNumbers.txt'))
    # writing the cubes numbers (chunk-dependent)
    for chunk in chunkNumbers:
        writeASCIIBlitzArrayToDisk(array(variables['chunkNumber_to_cubesNumbers'][chunk]).astype('i'), os.path.join(tmpDirName, 'Mg_LeftFrob', "chunk" + str(chunk) + 'cubesNumbers.txt'))
    # writing the chunk numbers (cube-dependent)
    writeASCIIBlitzArrayToDisk(array(variables['cubeNumber_to_chunkNumber']).astype('i'), os.path.join(pathToSaveTo, 'cubeNumber_to_chunkNumber.txt')) 
    # SAI precond computation
    Wall_time_Mg_computation, CPU_time_Mg_computation = compute_SAIpreconditioner(tmpDirName, variables['C'], variables['chunkNumber_to_cubesNumbers'], variables['cubeNumber_to_chunkNumber'], variables['chunkNumber_to_processNumber'], variables['processNumber_to_ChunksNumbers'], params_simu.MAX_BLOCK_SIZE)
    variables['Wall_time_Mg_computation'] = Wall_time_Mg_computation
    variables['CPU_time_Mg_computation'] = CPU_time_Mg_computation
    if (my_id == 0) and (params_simu.VERBOSE == 1):
        print variables['CPU_time_Z_near_computation'], "CPU time (seconds) for constructing Z_CFIE_near"
        print variables['Wall_time_Z_near_computation'], "Wall time (seconds) for constructing Z_CFIE_near"
        print variables['CPU_time_Mg_computation'], "CPU time (seconds) for constructing SAI precond"
        print variables['Wall_time_Mg_computation'], "Wall time (seconds) for constructing SAI precond"
    file = open(os.path.join(tmpDirName, 'pickle', 'variables.txt'), 'wb')
    cPickle.dump(variables, file)
    file.close()
예제 #4
0
def computeTreeParameters(my_id, tmpDirName, a, k, N_levels, params_simu):
    # L computation
    NB_DIGITS = params_simu.NB_DIGITS
    L = zeros(N_levels-1, 'i') # array of poles numbers: 1 number per level
    for i in range(L.shape[0]):
        L[i]  = L_computation(k, a*(2**i), NB_DIGITS)
    if (my_id==0) and (params_simu.VERBOSE == 1):
        print("L = " + str(L))
    # integration and interpolation data
    octtreeXcosThetas, octtreeWthetas, octtreeNthetas = octtreeXWN_computation(-1.0, 1.0, L, N_levels, params_simu.int_method_theta, params_simu.INCLUDE_BOUNDARIES)
    octtreeXthetas = zeros(octtreeXcosThetas.shape, 'd')
    for i in range(octtreeNthetas.shape[0]):
        Npoints = octtreeNthetas[i]
        octtreeXthetas[i,:Npoints] = arccos(octtreeXcosThetas[i, Npoints-1::-1])
    octtreeXphis, octtreeWphis, octtreeNphis = octtreeXWN_computation(0.0, 2.0*pi, L, N_levels, params_simu.int_method_phi, params_simu.INCLUDE_BOUNDARIES)
    #if (my_id==0):
    #    print("Nthetas =", octtreeNthetas)
    #    print("Nphis =", octtreeNphis)
    # order of interpolation
    NOrderInterpTheta = L[0]
    NOrderInterpPhi = L[0]
    # number of zones per theta
    #num_proc = MPI.COMM_WORLD.Get_size()
    #Ntheta_zones, Nphi_zones = directions_zones_calculation(num_proc)
    # now we write the info to disk
    writeScalarToDisk(NOrderInterpTheta, os.path.join(tmpDirName, 'octtree_data/NOrderInterpTheta.txt') )
    writeScalarToDisk(NOrderInterpPhi, os.path.join(tmpDirName, 'octtree_data/NOrderInterpPhi.txt') )
    #writeScalarToDisk(Ntheta_zones, os.path.join(tmpDirName, 'octtree_data/Ntheta_zones.txt') )
    #writeScalarToDisk(Nphi_zones, os.path.join(tmpDirName, 'octtree_data/Nphi_zones.txt') )
    writeASCIIBlitzArrayToDisk(L, os.path.join(tmpDirName, 'octtree_data/LExpansion.txt') )
    writeScalarToDisk(params_simu.alphaTranslation_smoothing_factor, os.path.join(tmpDirName, 'octtree_data/alphaTranslation_smoothing_factor.txt') )
    writeScalarToDisk(params_simu.alphaTranslation_thresholdRelValueMax, os.path.join(tmpDirName, 'octtree_data/alphaTranslation_thresholdRelValueMax.txt') )
    writeScalarToDisk(params_simu.alphaTranslation_RelativeCountAboveThreshold, os.path.join(tmpDirName, 'octtree_data/alphaTranslation_RelativeCountAboveThreshold.txt') )
    writeASCIIBlitzArrayToDisk(octtreeNthetas, os.path.join(tmpDirName, 'octtree_data/octtreeNthetas.txt') )
    writeASCIIBlitzArrayToDisk(octtreeNphis, os.path.join(tmpDirName, 'octtree_data/octtreeNphis.txt') )
    writeASCIIBlitzArrayToDisk(octtreeXthetas, os.path.join(tmpDirName, 'octtree_data/octtreeXthetas.txt') )
    writeASCIIBlitzArrayToDisk(octtreeXphis, os.path.join(tmpDirName, 'octtree_data/octtreeXphis.txt') )
    writeASCIIBlitzArrayToDisk(octtreeWthetas, os.path.join(tmpDirName, 'octtree_data/octtreeWthetas.txt') )
    writeASCIIBlitzArrayToDisk(octtreeWphis, os.path.join(tmpDirName, 'octtree_data/octtreeWphis.txt') )
    A_theta, B_theta, A_phi, B_phi = 0., pi, 0., 2.*pi
    N_theta, N_phi = octtreeNthetas[0], octtreeNphis[0]
    INCLUDED_THETA_BOUNDARIES, INCLUDED_PHI_BOUNDARIES = 0, 0
    if (abs(octtreeXthetas[0,0]-A_theta)<=1.e-8) and (abs(octtreeXthetas[0,N_theta-1]-B_theta)<=1.e-8):
        INCLUDED_THETA_BOUNDARIES = 1
    if (abs(octtreeXphis[0,0]-A_phi)<=1.e-8) and (abs(octtreeXphis[0,N_phi-1]-B_phi)<=1.e-8):
        INCLUDED_PHI_BOUNDARIES = 1
    writeScalarToDisk(INCLUDED_THETA_BOUNDARIES, os.path.join(tmpDirName, 'octtree_data/INCLUDED_THETA_BOUNDARIES.txt') )
    writeScalarToDisk(INCLUDED_PHI_BOUNDARIES, os.path.join(tmpDirName, 'octtree_data/INCLUDED_PHI_BOUNDARIES.txt') )

    # we now have to calculate the theta/phi abscissas for the coarsest level
    # These are needed for far-field computation
    L_coarsest = L_computation(k, a*(2**N_levels), NB_DIGITS)
    # theta abscissas
    NpointsTheta = L_coarsest + 1
    DTheta = 0
    if not params_simu.AUTOMATIC_THETAS and (params_simu.USER_DEFINED_NB_THETA > 0):
        NpointsTheta = params_simu.USER_DEFINED_NB_THETA
    else:
        NpointsThetaTmp = NpointsTheta * (params_simu.STOP_THETA - params_simu.START_THETA)/pi
        NpointsTheta = int(ceil(NpointsThetaTmp))+1
    octtreeXthetas_coarsest = zeros(NpointsTheta, 'd')
    if NpointsTheta>1:
        DTheta = (params_simu.STOP_THETA - params_simu.START_THETA)/(NpointsTheta - 1)
        for i in range(NpointsTheta):
            octtreeXthetas_coarsest[i] = params_simu.START_THETA + i*DTheta
        # make sure the last element is params_simu.STOP_THETA
        octtreeXthetas_coarsest[-1] = params_simu.STOP_THETA
    else:
        octtreeXthetas_coarsest[0] = params_simu.START_THETA
    # phis abscissas
    NpointsPhi = 2 * L_coarsest
    DPhi = 0
    if not params_simu.AUTOMATIC_PHIS and (params_simu.USER_DEFINED_NB_PHI > 0):
        NpointsPhi = params_simu.USER_DEFINED_NB_PHI
    else:
        NpointsPhiTmp = NpointsPhi * (params_simu.STOP_PHI - params_simu.START_PHI)/(2.0*pi)
        NpointsPhi = int(ceil(NpointsPhiTmp))+1
    octtreeXphis_coarsest = zeros(NpointsPhi, 'd')
    if NpointsPhi>1:
        DPhi = (params_simu.STOP_PHI - params_simu.START_PHI)/(NpointsPhi-1)
        for i in range(NpointsPhi):
            octtreeXphis_coarsest[i] = params_simu.START_PHI + i*DPhi
        # make sure the last element is params_simu.STOP_PHI
        octtreeXphis_coarsest[-1] = params_simu.STOP_PHI
    else:
        octtreeXphis_coarsest[0] = params_simu.START_PHI
    if (my_id==0):
        print("Summary of sampling points at the coarsest level (used for far-field sampling).")
        print("L_coarsest =", L_coarsest)
        print("For 0 < theta < 180, NpointsTheta = L_coarsest + 1 =", L_coarsest + 1)
        print("For", params_simu.START_THETA/pi*180, "< theta <", params_simu.STOP_THETA/pi*180, ", NpointsTheta =", NpointsTheta, ", DTheta =", DTheta/pi*180, "degrees")
        print("For 0 < phi < 360, NpointsPhi = 2 * L_coarsest =", 2 * L_coarsest)
        print("For", params_simu.START_PHI/pi*180, "< phi <", params_simu.STOP_PHI/pi*180, ", NpointsPhi =", NpointsPhi, ", DPhi =", DPhi/pi*180, "degrees")
    writeASCIIBlitzArrayToDisk(octtreeXthetas_coarsest, os.path.join(tmpDirName, 'octtree_data/octtreeXthetas_coarsest.txt') )
    writeASCIIBlitzArrayToDisk(octtreeXphis_coarsest, os.path.join(tmpDirName, 'octtree_data/octtreeXphis_coarsest.txt') )
    MPI.COMM_WORLD.Barrier()
예제 #5
0
def setup_excitation(params_simu, inputDirName, simuDirName):
    num_proc = MPI.COMM_WORLD.Get_size()
    my_id = MPI.COMM_WORLD.Get_rank()
    tmpDirName = os.path.join(simuDirName, 'tmp' + str(my_id))

    # phase center
    writeASCIIBlitzArrayToDisk(array(params_simu.r_phase_center), os.path.join(tmpDirName,'V_CFIE/r_phase_center.txt'))

    # observation points
    if (params_simu.BISTATIC_R_OBS == 1) and (params_simu.BISTATIC_R_OBS_FILENAME != ""):
        if (my_id==0): # this file is only on processor 0
            r_obs = read_observation_points(os.path.join(inputDirName, params_simu.BISTATIC_R_OBS_FILENAME))
        else:
            r_obs = zeros((1, 3), 'd')
        r_obs = MPI.COMM_WORLD.bcast(r_obs)
        writeASCIIBlitzArrayToDisk(r_obs, os.path.join(tmpDirName,'V_CFIE/r_obs.txt'))
        writeScalarToDisk(1, os.path.join(tmpDirName,'V_CFIE/BISTATIC_R_OBS.txt'))
    else:
        writeScalarToDisk(0, os.path.join(tmpDirName,'V_CFIE/BISTATIC_R_OBS.txt'))

    # bistatic observation angles
    if (params_simu.BISTATIC == 1) and (params_simu.BISTATIC_ANGLES_OBS == 1) and (params_simu.BISTATIC_ANGLES_OBS_FILENAME != ""):
        writeScalarToDisk(1, os.path.join(tmpDirName,'V_CFIE/BISTATIC_ANGLES_OBS.txt'))
    else:
        writeScalarToDisk(0, os.path.join(tmpDirName,'V_CFIE/BISTATIC_ANGLES_OBS.txt'))

    # Antenna pattern?
    if (params_simu.BISTATIC == 1) and (params_simu.ANTENNA_PATTERN == 1):
        writeScalarToDisk(1, os.path.join(tmpDirName,'V_CFIE/ANTENNA_PATTERN.txt'))
    else:
        writeScalarToDisk(0, os.path.join(tmpDirName,'V_CFIE/ANTENNA_PATTERN.txt'))

    # now the excitations
    writeScalarToDisk(params_simu.BISTATIC_EXCITATION_DIPOLES, os.path.join(tmpDirName,'V_CFIE/DIPOLES_EXCITATION.txt'))
    writeScalarToDisk(params_simu.BISTATIC_EXCITATION_PLANE_WAVE, os.path.join(tmpDirName,'V_CFIE/PLANE_WAVE_EXCITATION.txt'))
    writeScalarToDisk(params_simu.V_FULL_PRECISION*1, os.path.join(tmpDirName, 'V_CFIE/V_FULL_PRECISION.txt') )
    # if we have dipoles excitation AND definition of the excitation in a user-supplied file
    if (params_simu.BISTATIC_EXCITATION_DIPOLES == 1):
        if params_simu.BISTATIC_EXCITATION_J_DIPOLES_FILENAME != "":
            if (my_id==0): # this file is only on processor 0
                J_src, r_J_src = read_dipole_excitation(os.path.join(inputDirName, params_simu.BISTATIC_EXCITATION_J_DIPOLES_FILENAME))
            else:
                J_src, r_J_src = zeros((1, 3), 'D'), zeros((1, 3), 'd')
            J_src = MPI.COMM_WORLD.bcast(J_src)
            r_J_src = MPI.COMM_WORLD.bcast(r_J_src)
            writeScalarToDisk(1, os.path.join(tmpDirName,'V_CFIE/J_DIPOLES_EXCITATION.txt'))
            writeASCIIBlitzArrayToDisk(J_src, os.path.join(tmpDirName,'V_CFIE/J_dip.txt'))
            writeASCIIBlitzArrayToDisk(r_J_src, os.path.join(tmpDirName,'V_CFIE/r_J_dip.txt'))
        else:
            writeScalarToDisk(0, os.path.join(tmpDirName,'V_CFIE/J_DIPOLES_EXCITATION.txt'))
        if params_simu.BISTATIC_EXCITATION_M_DIPOLES_FILENAME != "":
            if (my_id==0): # this file is only on processor 0
                M_src, r_M_src = read_dipole_excitation(os.path.join(inputDirName, params_simu.BISTATIC_EXCITATION_M_DIPOLES_FILENAME))
            else:
                M_src, r_M_src = zeros((1, 3), 'D'), zeros((1, 3), 'd')
            M_src = MPI.COMM_WORLD.bcast(M_src)
            r_M_src = MPI.COMM_WORLD.bcast(r_M_src)
            writeScalarToDisk(1, os.path.join(tmpDirName,'V_CFIE/M_DIPOLES_EXCITATION.txt'))
            writeASCIIBlitzArrayToDisk(M_src, os.path.join(tmpDirName,'V_CFIE/M_dip.txt'))
            writeASCIIBlitzArrayToDisk(r_M_src, os.path.join(tmpDirName,'V_CFIE/r_M_dip.txt'))
        else:
            writeScalarToDisk(0, os.path.join(tmpDirName,'V_CFIE/M_DIPOLES_EXCITATION.txt'))
    # now the plane wave excitation
    if params_simu.BISTATIC_EXCITATION_PLANE_WAVE == 1:
        writeScalarToDisk(params_simu.theta_inc, os.path.join(tmpDirName,'V_CFIE/theta_inc.txt'))
        writeScalarToDisk(params_simu.phi_inc, os.path.join(tmpDirName,'V_CFIE/phi_inc.txt'))
        E_inc = array([params_simu.E_inc_theta, params_simu.E_inc_phi], 'D')
        writeASCIIBlitzArrayToDisk(E_inc, os.path.join(tmpDirName,'V_CFIE/E_inc.txt'))
    if (params_simu.BISTATIC_EXCITATION_DIPOLES != 1) and (params_simu.BISTATIC_EXCITATION_PLANE_WAVE != 1):
        if (my_id==0):
            print("incorrect excitation choice. You have to choose dipole and/or plane wave excitation.")
        sys.exit(1)

    if (params_simu.MONOSTATIC_RCS == 1) and (params_simu.ANGLES_FROM_FILE == 1) and (params_simu.ANGLES_FILENAME != ""):
        if (my_id==0): # this file is only on processor 0
            angles = read_input_angles(os.path.join(inputDirName, params_simu.ANGLES_FILENAME))
        else:
            angles = zeros((1, 2), 'd')
        angles = MPI.COMM_WORLD.bcast(angles)
        writeASCIIBlitzArrayToDisk(angles, os.path.join(tmpDirName,'V_CFIE/monostatic_angles.txt'))
        writeScalarToDisk(1, os.path.join(tmpDirName,'V_CFIE/ANGLES_FROM_FILE.txt'))
    elif (params_simu.MONOSTATIC_RCS == 1) and ((params_simu.ANGLES_FROM_FILE == 0) or (params_simu.ANGLES_FILENAME == "")):
        writeScalarToDisk(0, os.path.join(tmpDirName,'V_CFIE/ANGLES_FROM_FILE.txt'))

    if params_simu.MONOSTATIC_SAR==1:
        writeASCIIBlitzArrayToDisk(array(params_simu.SAR_local_x_hat, 'd'), os.path.join(tmpDirName,'V_CFIE/SAR_local_x_hat.txt'))
        writeASCIIBlitzArrayToDisk(array(params_simu.SAR_local_y_hat, 'd'), os.path.join(tmpDirName,'V_CFIE/SAR_local_y_hat.txt'))
        writeASCIIBlitzArrayToDisk(array(params_simu.SAR_plane_origin, 'd'), os.path.join(tmpDirName,'V_CFIE/SAR_plane_origin.txt'))
        writeScalarToDisk(params_simu.SAR_x_span, os.path.join(tmpDirName,'V_CFIE/SAR_x_span.txt'))
        writeScalarToDisk(params_simu.SAR_y_span, os.path.join(tmpDirName,'V_CFIE/SAR_y_span.txt'))
        writeScalarToDisk(params_simu.SAR_x_span_offset, os.path.join(tmpDirName,'V_CFIE/SAR_x_span_offset.txt'))
        writeScalarToDisk(params_simu.SAR_y_span_offset, os.path.join(tmpDirName,'V_CFIE/SAR_y_span_offset.txt'))
        writeScalarToDisk(params_simu.SAR_N_x_points, os.path.join(tmpDirName,'V_CFIE/SAR_N_x_points.txt'))
        writeScalarToDisk(params_simu.SAR_N_y_points, os.path.join(tmpDirName,'V_CFIE/SAR_N_y_points.txt'))
예제 #6
0
def setup_mesh(params_simu, simuDirName):
    """Sets up the mesh.
       params_simu is a class instance that contains the parameters for the simulation.
    """
    num_procs = MPI.COMM_WORLD.Get_size()
    my_id = MPI.COMM_WORLD.Get_rank()

    tmpDirName = os.path.join(simuDirName, 'tmp' + str(my_id))
    geoDirName = os.path.join(simuDirName, 'geo')
    meshPath = os.path.join(tmpDirName, "mesh")

    # size of cube at finest level
    a = params_simu.c / params_simu.f * params_simu.a_factor
    if (my_id == 0):
        N_RWG = readIntFromDisk(os.path.join(meshPath, "N_RWG.txt"))
        N_levels = readIntFromDisk(os.path.join(meshPath, 'N_levels.txt'))
        max_N_cubes_1D = readIntFromDisk(
            os.path.join(meshPath, 'max_N_cubes_1D.txt'))
        C = readIntFromDisk(os.path.join(meshPath, 'C.txt'))
        big_cube_center_coord = read1DBlitzArrayFromDisk(
            os.path.join(meshPath, "big_cube_center_coord.txt"), 'd')
        big_cube_lower_coord = read1DBlitzArrayFromDisk(
            os.path.join(meshPath, "big_cube_lower_coord.txt"), 'd')

        # writing some data
        print("N_levels = " + str(N_levels))
        print("max_N_cubes_1D = " + str(max_N_cubes_1D))
        print("big_cube_center_coord = " + str(big_cube_center_coord))
        print("big_cube_lower_coord = " + str(big_cube_lower_coord))

    else:
        big_cube_lower_coord = ['blabla']
        big_cube_center_coord = ['blabla']
        N_levels = ['blabla']
        N_RWG = ['blabla']
        C = ['blabla']
    big_cube_lower_coord = MPI.COMM_WORLD.bcast(big_cube_lower_coord)
    big_cube_center_coord = MPI.COMM_WORLD.bcast(big_cube_center_coord)
    N_levels = MPI.COMM_WORLD.bcast(N_levels)
    N_RWG = MPI.COMM_WORLD.bcast(N_RWG)
    C = MPI.COMM_WORLD.bcast(C)

    w = 2. * pi * params_simu.f
    k = w * sqrt(params_simu.eps_0 * params_simu.eps_r * params_simu.mu_0 *
                 params_simu.mu_r) + 1.j * 0.
    CFIE = array(params_simu.CFIE).astype('D')

    writeScalarToDisk(num_procs,
                      os.path.join(tmpDirName, 'octtree_data/num_procs.txt'))
    writeScalarToDisk(
        a, os.path.join(tmpDirName, 'octtree_data/leaf_side_length.txt'))
    writeScalarToDisk(2.0 * pi * params_simu.f,
                      os.path.join(tmpDirName, 'octtree_data/w.txt'))
    writeScalarToDisk(params_simu.eps_r,
                      os.path.join(tmpDirName, 'octtree_data/eps_r.txt'))
    writeScalarToDisk(params_simu.mu_r,
                      os.path.join(tmpDirName, 'octtree_data/mu_r.txt'))
    writeScalarToDisk(k, os.path.join(tmpDirName, 'octtree_data/k.txt'))
    writeASCIIBlitzArrayToDisk(
        CFIE, os.path.join(tmpDirName, 'octtree_data/CFIEcoeffs.txt'))
    writeScalarToDisk(N_RWG, os.path.join(tmpDirName,
                                          'octtree_data/N_RWG.txt'))
    writeScalarToDisk(
        N_levels - 1,
        os.path.join(tmpDirName, 'octtree_data/N_active_levels.txt'))
    writeASCIIBlitzArrayToDisk(
        big_cube_lower_coord,
        os.path.join(tmpDirName, 'octtree_data/big_cube_lower_coord.txt'))
    writeASCIIBlitzArrayToDisk(
        big_cube_center_coord,
        os.path.join(tmpDirName, 'octtree_data/big_cube_center_coord.txt'))
    writeScalarToDisk(
        params_simu.PERIODIC_Theta * 1,
        os.path.join(tmpDirName, 'octtree_data/PERIODIC_Theta.txt'))
    writeScalarToDisk(
        params_simu.CYCLIC_Theta * 1,
        os.path.join(tmpDirName, 'octtree_data/CYCLIC_Theta.txt'))
    writeScalarToDisk(
        params_simu.PERIODIC_Phi * 1,
        os.path.join(tmpDirName, 'octtree_data/PERIODIC_Phi.txt'))
    writeScalarToDisk(params_simu.CYCLIC_Phi * 1,
                      os.path.join(tmpDirName, 'octtree_data/CYCLIC_Phi.txt'))
    writeScalarToDisk(
        params_simu.ALLOW_CEILING_LEVEL * 1,
        os.path.join(tmpDirName, 'octtree_data/ALLOW_CEILING_LEVEL.txt'))
    writeScalarToDisk(
        params_simu.DIRECTIONS_PARALLELIZATION * 1,
        os.path.join(tmpDirName,
                     'octtree_data/DIRECTIONS_PARALLELIZATION.txt'))
    writeScalarToDisk(
        params_simu.BE_BH_N_Gauss_points,
        os.path.join(tmpDirName, 'octtree_data/N_GaussOnTriangle.txt'))
    writeScalarToDisk(
        params_simu.MOM_FULL_PRECISION * 1,
        os.path.join(tmpDirName, 'octtree_data/MOM_FULL_PRECISION.txt'))
    writeScalarToDisk(params_simu.VERBOSE * 1,
                      os.path.join(tmpDirName, 'octtree_data/VERBOSE.txt'))
    writeScalarToDisk(params_simu.TDS_APPROX * 1,
                      os.path.join(tmpDirName, 'octtree_data/TDS_APPROX.txt'))
    writeScalarToDisk(params_simu.Z_s,
                      os.path.join(tmpDirName, 'octtree_data/Z_s.txt'))
    # what type of simulation are we running?
    writeScalarToDisk(params_simu.BISTATIC * 1,
                      os.path.join(tmpDirName, 'BISTATIC.txt'))
    writeScalarToDisk(params_simu.MONOSTATIC_RCS * 1,
                      os.path.join(tmpDirName, 'MONOSTATIC_RCS.txt'))
    writeScalarToDisk(params_simu.MONOSTATIC_SAR * 1,
                      os.path.join(tmpDirName, 'MONOSTATIC_SAR.txt'))
    writeScalarToDisk(params_simu.COMPUTE_RCS_HH * 1,
                      os.path.join(tmpDirName, 'COMPUTE_RCS_HH.txt'))
    writeScalarToDisk(params_simu.COMPUTE_RCS_VV * 1,
                      os.path.join(tmpDirName, 'COMPUTE_RCS_VV.txt'))
    writeScalarToDisk(params_simu.COMPUTE_RCS_HV * 1,
                      os.path.join(tmpDirName, 'COMPUTE_RCS_HV.txt'))
    writeScalarToDisk(params_simu.COMPUTE_RCS_VH * 1,
                      os.path.join(tmpDirName, 'COMPUTE_RCS_VH.txt'))
    writeScalarToDisk(params_simu.USE_PREVIOUS_SOLUTION * 1,
                      os.path.join(tmpDirName, 'USE_PREVIOUS_SOLUTION.txt'))
    writeScalarToDisk(
        params_simu.MONOSTATIC_BY_BISTATIC_APPROX * 1,
        os.path.join(tmpDirName, 'MONOSTATIC_BY_BISTATIC_APPROX.txt'))
    writeScalarToDisk(params_simu.MAXIMUM_DELTA_PHASE,
                      os.path.join(tmpDirName, 'MAXIMUM_DELTA_PHASE.txt'))
    # writing the iterative solver setup
    restrt = min(params_simu.RESTART, N_RWG)
    writeScalarToDisk(params_simu.MAXITER,
                      os.path.join(tmpDirName, 'iterative_data/MAXITER.txt'))
    writeScalarToDisk(restrt,
                      os.path.join(tmpDirName, 'iterative_data/RESTART.txt'))
    writeScalarToDisk(params_simu.SOLVER,
                      os.path.join(tmpDirName, 'iterative_data/SOLVER.txt'))
    writeScalarToDisk(
        params_simu.INNER_SOLVER,
        os.path.join(tmpDirName, 'iterative_data/INNER_SOLVER.txt'))
    writeScalarToDisk(params_simu.TOL,
                      os.path.join(tmpDirName, 'iterative_data/TOL.txt'))
    writeScalarToDisk(params_simu.INNER_TOL,
                      os.path.join(tmpDirName, 'iterative_data/INNER_TOL.txt'))
    writeScalarToDisk(
        params_simu.INNER_MAXITER,
        os.path.join(tmpDirName, 'iterative_data/INNER_MAXITER.txt'))
    writeScalarToDisk(
        params_simu.INNER_RESTART,
        os.path.join(tmpDirName, 'iterative_data/INNER_RESTART.txt'))
    writeScalarToDisk(N_RWG, os.path.join(tmpDirName, 'ZI/ZI_size.txt'))

    variables = {}
    variables['a'] = a
    variables['k'] = k
    variables['w'] = w
    variables['C'] = C
    variables['N_RWG'] = N_RWG
    variables['N_levels'] = N_levels
    variables['CFIE'] = CFIE
    file = open(os.path.join(tmpDirName, 'pickle', 'variables.txt'), 'wb')
    cPickle.dump(variables, file)
    file.close()
예제 #7
0
def Mg_listsOfZnearBlocks_ToTransmitAndReceive(
        ZnearChunkNumber_to_cubesNumbers, ZnearCubeNumber_to_chunkNumber,
        ZnearChunkNumber_to_processNumber, ZnearProcessNumber_to_ChunksNumbers,
        pathToReadFrom, Z_TMP_ELEM_TYPE):
    """this function creates 2 lists: Mg_listsOfZ_nearToTransmit and Mg_listsOfZ_nearToReceive"""
    num_proc = MPI.COMM_WORLD.Get_size()
    my_id = MPI.COMM_WORLD.Get_rank()
    chunkNumbers = ZnearProcessNumber_to_ChunksNumbers[my_id]
    localPreconditionedCubesNumbers = []
    for i in chunkNumbers:
        localPreconditionedCubesNumbers.append(
            ZnearChunkNumber_to_cubesNumbers[i])
    listCubesNumbersToReceiveTmp, listCubesNumbersToSendTmp = [], []
    # initialization of the lists
    for i in range(num_proc):
        listCubesNumbersToReceiveTmp.append([])
        listCubesNumbersToSendTmp.append([])
    # we now fill the lists
    for elem in localPreconditionedCubesNumbers:
        for localCube in elem:  # elem is a list of cubes Numbers
            chunkNumber = ZnearCubeNumber_to_chunkNumber[localCube]
            pathToReadCubeFrom = os.path.join(pathToReadFrom,
                                              "chunk" + str(chunkNumber))
            cube = CubeClass()
            cube.setIntArraysFromFile(pathToReadCubeFrom, localCube)
            for j in cube.cubeNeighborsIndexes:
                ZnearChunkNumber = ZnearCubeNumber_to_chunkNumber[j]
                ZnearProcessNumber = ZnearChunkNumber_to_processNumber[int(
                    ZnearChunkNumber)]
                if not (my_id == ZnearProcessNumber):
                    listCubesNumbersToReceiveTmp[ZnearProcessNumber].append(j)
                    listCubesNumbersToSendTmp[ZnearProcessNumber].append(
                        localCube)
    # we now reduce the redundancy of the lists
    listCubesNumbersToReceive, listCubesNumbersToSend = [], []
    for i in range(num_proc):
        listCubesNumbersToReceive.append(
            reduceListRedundancy(listCubesNumbersToReceiveTmp[i]))
        listCubesNumbersToSend.append(
            reduceListRedundancy(listCubesNumbersToSendTmp[i]))
    # now we construct the corresponding chunkNumbers and processNumbers lists
    listChunkNumbersToReceive, listChunkNumbersToSend = [], []
    for L in listCubesNumbersToReceive:
        listChunkNumbersToReceive.append([])
        for i in L:
            listChunkNumbersToReceive[-1].append(
                ZnearCubeNumber_to_chunkNumber[i])
    for L in listCubesNumbersToSend:
        listChunkNumbersToSend.append([])
        for i in L:
            listChunkNumbersToSend[-1].append(
                ZnearCubeNumber_to_chunkNumber[i])
    ## we create the missing directories
    for L in listChunkNumbersToReceive:
        for i in L:
            if 'chunk' + str(i) not in os.listdir(pathToReadFrom):
                os.mkdir(os.path.join(pathToReadFrom, 'chunk' + str(i)))
    ## now we write the data to be exchanged to disk
    for i in range(num_proc):
        if not (my_id == i):
            writeASCIIBlitzArrayToDisk(
                array(listCubesNumbersToSend[i]).astype('i'),
                os.path.join(pathToReadFrom,
                             "CubesNumbersToSendToP" + str(i) + ".txt"))
            writeASCIIBlitzArrayToDisk(
                array(listChunkNumbersToSend[i]).astype('i'),
                os.path.join(pathToReadFrom,
                             "ChunkNumbersToSendToP" + str(i) + ".txt"))
    #MPI.COMM_WORLD.Barrier()
    ## finally we write the format of the Near Field matrix elements
    NBytes = 8
    if Z_TMP_ELEM_TYPE == 'D':
        NBytes = 16
        print(
            "16 Bytes not supported yet in data transfer in communicateZnearBlocks. Exiting...."
        )
        sys.exit(1)
    writeScalarToDisk(NBytes, os.path.join(pathToReadFrom, "itemsize.txt"))
    MPI.COMM_WORLD.Barrier()