def get_out_dirs(self):
     '''
     Gets output directories for model
     '''
     self.OUT_DIR = io_utils.get_dir('./results/')
     # directory for saved models
     self.MODEL_OUT_DIR = io_utils.get_dir(
         os.path.join(self.OUT_DIR, 'models/', self.MODEL_RUN + "/"))
     # directory for saved summaries
     self.SUMMARY_OUT_DIR = io_utils.get_dir(
         os.path.join(self.OUT_DIR, 'summaries/', self.MODEL_RUN + "/"))
def generate_example(ivel):

    SIM_NUM = ivel
    VEL_FILE = VEL_DIR + "velocity_%.8i.txt" % (ivel)

    if SIM_NUM % QC_FREQ == 0: OUTPUT_WAVEFIELD = 1
    else: OUTPUT_WAVEFIELD = 0  # whether to output wavefield (I/O heavy!)

    # run a separate simulation for each source
    for isource, source_i in enumerate(source_is[SIM_NUM]):

        # create a temporary directory for simulation output (prevent I/O clash between processes)
        TEMP_OUT_SIM_DIR = OUT_SIM_DIR + str(SIM_NUM) + "/"
        io_utils.get_dir(TEMP_OUT_SIM_DIR)

        # create receiver file
        RECEIVER_FILE = TEMP_OUT_SIM_DIR + "receiver_ijs_%s_%i.txt" % (SIM_RUN,
                                                                       SIM_NUM)
        with open(RECEIVER_FILE, 'w') as f:
            f.write("%i\n" % (N_REC))
            for rec_i in receiver_is:
                f.write("%i %i\n" %
                        (rec_i[0] + 1, rec_i[1] +
                         1))  # SEISMIC CPML uses indices starting at 1

        # create source file (single source)
        SOURCE_FILE = TEMP_OUT_SIM_DIR + "source_ijs_%s_%i.txt" % (SIM_RUN,
                                                                   SIM_NUM)
        with open(SOURCE_FILE, 'w') as f:
            f.write("%i\n" % (1))
            f.write("%i %i\n" % (source_i[0] + 1, source_i[1] +
                                 1))  # SEISMIC CPML uses indices starting at 1

        # RUN FORWARD SIMULATION

        cmd = "./xmodified_seismic_CPML_2D_pressure_second_order " + \
            "%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s"%(
            NSTEPS,
            NX,
            NY,
            DELTAX,
            DELTAY,
            DELTAT,
            NPOINTS_PML,
            0,# SOURCE_X (m)
            0,# SOURCE_Z (m)
            SOURCE_FILE,
            VEL_FILE,
            TEMP_OUT_SIM_DIR,
            SIM_NUM,
            RECEIVER_FILE,
            OUTPUT_WAVEFIELD)

        return_code = run_command(cmd.split(" "), verbose=False)  # run

        if return_code != 0:
            print("ERROR: Simulation %i, %i broke, check stderr" %
                  (ivel, isource))
            # CLEAR INTERMEDIARY FILES (CAREFUL !)
            io_utils.remove_dir(TEMP_OUT_SIM_DIR)
            return False

        # IMPORT GATHER INTO NUMPY

        gather = np.zeros((N_REC, NSTEPS), dtype=np.float32)
        file = TEMP_OUT_SIM_DIR + "gather_%.8i.bin" % (SIM_NUM)
        # Read each binary gather file (MUCH QUICKER THAN READING TEXT FILES, beacause its directed)
        with open(file, 'rb') as f:
            #Note SEISMIC_CPML double precision saved to 64 bit floats (!) we DOWNSAMPLE to 32 bit floats
            # count = number of items (==np.float64 values) to process)
            for irec in np.arange(N_REC):
                gather[irec, :] = np.fromfile(f,
                                              dtype=np.float64,
                                              count=NSTEPS).astype(np.float32)

        # PRE-PROCESSING
        gather_decimated = np.copy(gather)  # important to copy
        gather_decimated = gather_decimated[:, ::ds]  # DOWNSAMPLE GATHER

        # SAVE
        np.save(OUT_SIM_DIR + "gather_%.8i_%.8i.npy" % (SIM_NUM, isource),
                gather_decimated)

        # IMPORT WAVEFIELDS INTO NUMPY (for QC)
        if OUTPUT_WAVEFIELD:
            wavefields = np.zeros((NSTEPS, NX, NY), dtype=np.float32)
            files = [
                TEMP_OUT_SIM_DIR + "wavefield_%.8i_%.8i.bin" % (SIM_NUM, i + 1)
                for i in range(NSTEPS)
            ]  # SEISMIC CPML uses indices starting at 1
            for i in range(NSTEPS):
                # Read each binary wavefield file (MUCH QUICKER THAN READING TEXT FILES, beacause its directed)
                with open(files[i], 'rb') as f:
                    #Note SEISMIC_CPML double precision saved to 64 bit floats (!) we DOWNSAMPLE to 32 bit floats
                    # count = number of items (==np.float64 values) to process)
                    for iz in np.arange(NY):
                        wavefields[i, :, iz] = np.fromfile(f,
                                                           dtype=np.float64,
                                                           count=NX).astype(
                                                               np.float32)

            np.save(
                OUT_SIM_DIR + "wavefields_%.8i_%.8i.npy" % (SIM_NUM, isource),
                wavefields)
            np.save(
                OUT_SIM_DIR + "gather_raw_%.8i_%.8i.npy" % (SIM_NUM, isource),
                gather)

        # CLEAR INTERMEDIARY FILES (CAREFUL !)
        io_utils.remove_dir(TEMP_OUT_SIM_DIR)

    return True
                block=True, timeout=10
            )  # try to get the next task, allow some time for process clash (ivel number)
        except queue.Empty:
            break  # kill process if no more tasks left
        example = generate_example(ivel)
        resultQ.put(example)  # push the example to the results queue


if __name__ == "__main__":

    # initiate
    VEL_DIR = ROOT_DIR + "velocity/" + VEL_RUN + "/"
    OUT_SIM_DIR = ROOT_DIR + "gather/" + SIM_RUN + "/"

    # clear output directory for all simulations
    io_utils.get_dir(OUT_SIM_DIR)

    #save copy of this script for future reference
    shutil.copyfile(
        'generate_forward_simulations.py',
        OUT_SIM_DIR + 'generate_forward_simulations_%s.py' % (SIM_RUN))

    # save source, receiver positions
    np.save(OUT_SIM_DIR + "source_is.npy", source_is)
    np.save(OUT_SIM_DIR + "receiver_is.npy", receiver_is)

    # make queues
    taskQ = multiprocessing.Queue()
    resultQ = multiprocessing.Queue()

    # push simulations to queues
예제 #4
0
plt.figure(figsize=(20, 6))
plt.imshow(data.T)
plt.colorbar()
plt.show()

# 3. cut out interesting AOIs

np.random.seed(1234)
LX = 128
N_BOXES = 100
boxes = LX * np.ones((N_BOXES, 4), dtype=int)
boxes[:, 0] = np.random.randint(1500, 2200, N_BOXES)
boxes[:, 1] = np.random.randint(60, 200, N_BOXES)

io_utils.get_dir("velocity/marmousi/")
for ibox, box in enumerate(boxes):
    aoi = data[box[0]:box[0] + box[2], box[1]:box[1] + box[3]]
    np.save("velocity/marmousi/velocity_%.8i.npy" % (ibox), aoi)
    print(aoi.shape)

    # SAVE TO .TXT FILE (for SEISMIC_CPML simulation)
    with open("velocity/marmousi/velocity_%.8i.txt" % (ibox), 'w') as f:
        for iy in range(LX):
            for ix in range(LX):
                f.write(
                    "%i %i %.2f\n" %
                    (ix + 1, iy + 1,
                     aoi[ix, iy]))  # SEISMIC CPML uses indices starting at 1

plt.figure(figsize=(20, 6))
예제 #5
0
 def get_outdirs(self):
     io_utils.get_dir(self.SUMMARY_OUT_DIR)
     io_utils.clear_dir(self.SUMMARY_OUT_DIR)
     io_utils.get_dir(self.MODEL_OUT_DIR)
     io_utils.clear_dir(self.MODEL_OUT_DIR)
예제 #6
0
            self.vm_gradient_mu_L, self.vm_gradient_sigma_L)
        self.vm_v0_mu_N, self.vm_v0_sigma_N = lnLtoN(self.vm_v0_mu_L,
                                                     self.vm_v0_sigma_L)
        self.vm_bed_thickness_mu_N, self.vm_bed_thickness_sigma_N = lnLtoN(
            self.vm_bed_thickness_mu_L, self.vm_bed_thickness_sigma_L)


if __name__ == "__main__":

    # DEFINE PARAMETERS

    c = Constants()

    # SET UP DIRECTORIES

    io_utils.get_dir(c.VEL_DIR)
    #io_utils.clear_dir(c.VEL_DIR)  ### CAREFUL: DELETES ALL CONTENTS OF DIRECTORY RECURSIVELY

    shutil.copyfile('generate_velocity_models.py',
                    c.VEL_DIR + 'generate_velocity_models_%s.py' % (c.RUN))

    # GENERATE 1D MODELS

    np.random.seed(c.random_seed)  # for reproducibility
    vm_s = [generate_1D_random_velocity_trace(c) for _ in range(c.n_examples)]

    # CONVERT TO 2D, ADD FAULTS (MULTIPROCESSING)

    batches = np.array_split(np.arange(c.n_examples),
                             np.max([1, c.n_examples // 100]))
    print("%i batches created" % (len(batches)))