def focdefocang(img, mdl, nzp=64, nxp=64, strdz=None, strdx=None, rectx=30, rectz=30, verb=False): """ Classifies an angle gather as focused or defocused Parameters: img - the input extended image [na,nz,nx] mdl - the trained keras model nzp - z-dimension of the patch provided to the CNN [64] nxp - x-dimension of the patch provided to the CNN [64] strdz - z-dimension of the patch stride (50% overlap) [npz/2] strdx - x-dimension of the patch stride (50% overlap) [npx/2] rectz - number of points to smooth in z direction [30] rectx - number of points to smooth in x direction [30] Returns a smooth probability map of focused/defocused faults """ # Get image dimensions na = img.shape[0] nz = img.shape[1] nx = img.shape[2] # Get strides if (strdz is None): strdz = int(nzp / 2) if (strdx is None): strdx = int(nxp / 2) # Build the Patch Extractors pea = PatchExtractor((na, nzp, nxp), stride=(na, strdz, strdx)) aptch = np.squeeze(pea.extract(img)) # Flatten patches and make a prediction on each numpz = aptch.shape[0] numpx = aptch.shape[1] aptchf = np.expand_dims(normalize( aptch.reshape([numpz * numpx, na, nzp, nxp])), axis=-1) focprd = mdl.predict(aptchf) focprdptch = np.zeros([numpz * numpx, nzp, nxp]) for iptch in range(numpz * numpx): focprdptch[iptch, :, :] = focprd[iptch] focprdptch = focprdptch.reshape([numpz, numpx, nzp, nxp]) # Output probabilities per = PatchExtractor((nzp, nxp), stride=(strdz, strdx)) focprdimg = np.zeros([nz, nx]) _ = per.extract(focprdimg) focprdimg = per.reconstruct(focprdptch.reshape([numpz, numpx, nzp, nxp])) focprdimgsm = smooth(focprdimg.astype('float32'), rect1=rectx, rect2=rectz) return focprdimgsm
def corrsim(img, tgt): """ A cross correlation image similarity metric Parameters: img - the input image tgt - the target image for comparison Returns a scalar metric for the similarity between images """ # Normalize images normi = normalize(img) normt = normalize(tgt) # Cross correlation xcor = np.max(correlate2d(normi, normt, mode='same')) # Autocorrelations icor = np.max(correlate2d(normi, normi, mode='same')) tcor = np.max(correlate2d(normt, normt, mode='same')) # Similarity metric return xcor / np.sqrt(icor * tcor)
def segmentfaults(img, mdl, nzp=128, nxp=128, strdz=None, strdx=None, resize=False, verb=False): """ Segments faults on a 2D image. Returns the probablility of each pixel being a fault or not. Parameters: img - the input image [nz,nx] mdl - the trained keras model nzp - z-dimension of the patch provided to the CNN [128] nxp - x-dimension of the patch provided to the CNN [128] strdz - z-dimension of the patch stride (50% overlap) [npz/2] strdx - x-dimension of the patch stride (50% overlap) [npx/2] resize - option to resize the image to a power of two in each dimension [False] verb - verbosity flag [False] Returns the spatial fault probability map [nz,nx] """ # Resample to nearest power of 2 if (resize): rimg = resizepow2(img, kind='linear') else: rimg = img # Perform the patch extraction if (strdz is None): strdz = int(nzp / 2) if (strdx is None): strdx = int(nxp / 2) pe = PatchExtractor((nzp, nxp), stride=(strdx, strdz)) iptch = pe.extract(rimg) numpz = iptch.shape[0] numpx = iptch.shape[1] iptch = iptch.reshape([numpx * numpz, nzp, nxp, 1]) # Normalize each patch niptch = np.zeros(iptch.shape) for ip in range(numpz * numpx): niptch[ip, :, :] = normalize(iptch[ip, :, :]) # Make a prediction iprd = mdl.predict(niptch, verbose=verb) # Reconstruct the predictions ipra = iprd.reshape([numpz, numpx, nzp, nxp]) iprb = pe.reconstruct(ipra) if (iprb.shape != rimg.shape): iptch = pe.extract(rimg) rimg = pe.reconstruct(iptch) return iprb, rimg else: return iprb
def detectfaultpatch(img, mdl, nzp=64, nxp=64, strdz=None, strdx=None, rectx=30, rectz=30, verb=False): """ Detects if a fault is present within an image or not Parameters: img - the input image [nz,nx] mdl - the trained keras model nzp - z-dimension of the patch provided to the CNN [128] nxp - x-dimension of the patch provided to the CNN [128] strdz - z-dimension of the patch stride (50% overlap) [npz/2] strdx - x-dimension of the patch stride (50% overlap) [npx/2] rectz - number of points to smooth in z direction [30] rectx - number of points to smooth in x direction [30] Returns a smooth probability map of detected faults """ # Resample to nearest power of 2 rimg = resizepow2(img, kind='linear') # Perform the patch extraction if (strdz is None): strdz = int(nzp / 2) if (strdx is None): strdx = int(nxp / 2) pe = PatchExtractor((nzp, nxp), stride=(strdx, strdz)) iptch = pe.extract(rimg) numpz = iptch.shape[0] numpx = iptch.shape[1] iptch = iptch.reshape([numpx * numpz, nzp, nxp, 1]) # Normalize and predict for each patch iprd = np.zeros(iptch.shape) for ip in range(numpz * numpx): iprd[ip, :, :] = mdl.predict( np.expand_dims(normalize(iptch[ip, :, :]), axis=0)) # Reconstruct the predictions ipra = iprd.reshape([numpz, numpx, nzp, nxp]) iprb = pe.reconstruct(ipra) # Smooth the predictions smprb = smooth(iprb.astype('float32'), rect1=rectx, rect2=rectz) return smprb
def segmentfaults(img,net,nzp=128,nxp=128,strdz=None,strdx=None,resize=False): """ Segments faults on a 2D image. Returns the probablility of each pixel being a fault or not. Parameters: img - the input image [nz,nx] net - the torch network with trained weights nzp - z-dimension of the patch provided to the CNN [128] nxp - x-dimension of the patch provided to the CNN [128] strdz - z-dimension of the patch stride (50% overlap) [npz/2] strdx - x-dimension of the patch stride (50% overlap) [npx/2] resize - option to resize the image to a power of two in each dimension [False] verb - verbosity flag [False] Returns the spatial fault probability map [nz,nx] """ # Resample to nearest power of 2 if(resize): rimg = resizepow2(img,kind='linear') else: rimg = img # Perform the patch extraction if(strdz is None): strdz = int(nzp/2) if(strdx is None): strdx = int(nxp/2) pe = PatchExtractor((nzp,nxp),stride=(strdx,strdz)) iptch = pe.extract(rimg) numpz = iptch.shape[0]; numpx = iptch.shape[1] iptch = iptch.reshape([numpx*numpz,1,nzp,nxp]) # Normalize each patch niptch = np.zeros(iptch.shape) for ip in range(numpz*numpx): niptch[ip,:,:] = normalize(iptch[ip,:,:]) # Convert to torch tensor tniptch = torch.from_numpy(niptch.astype('float32')) # Make a prediction with torch.no_grad(): iprd = torch.sigmoid(net(tniptch)).numpy() # Reconstruct the predictions ipra = iprd.reshape([numpz,numpx,nzp,nxp]) iprb = pe.reconstruct(ipra) return iprb
def random_hale_vel(nz=900, nx=800, dz=0.005, dx=0.01675, vzin=None): """ Generates a random realization of the Hale/BEI velocity model Parameters: nz - output number of depth samples [900] nx - output number of lateral samples [800] dz - depth sampling [0.005] dx - lateral sampling [0.01675] vzin - a vzin that determines the velocity values [None] """ dzm, dxm = dz * 1000, dx * 1000 nlayer = 200 minvel, maxvel = 1600, 5000 vz = np.linspace(maxvel, minvel, nlayer) if (vzin is not None): vzr = resample(vzin, 90) * 1000 vz[-90:] = vzr[::-1] mb = mdlbuild.mdlbuild(nx, dxm, 20, dy=dxm, dz=dzm, basevel=5000) thicks = np.random.randint(5, 15, nlayer) # Randomize the squishing depth sqz = np.random.choice(list(range(180, 199))) dlyr = 0.05 # Build the sedimentary layers for ilyr in range(nlayer): mb.deposit(velval=vz[ilyr], thick=thicks[ilyr], dev_pos=0.0, layer=50, layer_rand=0.00, dev_layer=dlyr) if (ilyr == sqz): mb.squish(amp=150, azim=90.0, lam=0.4, rinline=0.0, rxline=0.0, mode='perlin', octaves=3, order=3) mb.deposit(1480, thick=40, layer=150, dev_layer=0.0) mb.trim(top=0, bot=nz) # Pos xpos = np.asarray([0.25, 0.30, 0.432, 0.544, 0.6, 0.663]) xhi = xpos + 0.04 xlo = xpos - 0.04 cxpos = np.zeros(xpos.shape) nflt = len(xpos) for iflt in range(nflt): cxpos[iflt] = randfloat(xlo[iflt], xhi[iflt]) if (iflt > 0 and cxpos[iflt] - cxpos[iflt - 1] < 0.07): cxpos[iflt] += 0.07 cdaz = randfloat(16000, 20000) cdz = cdaz + randfloat(0, 6000) # Choose the theta_die theta_die = randfloat(1.5, 3.5) if (theta_die < 2.7): begz = randfloat(0.23, 0.26) else: begz = randfloat(0.26, 0.33) fpr = np.random.choice([True, True, False]) rd = randfloat(52, 65) dec = randfloat(0.94, 0.96) mb.fault2d(begx=cxpos[iflt], begz=begz, daz=cdaz, dz=cdz, azim=180, theta_die=theta_die, theta_shift=4.0, dist_die=2.0, throwsc=35.0, fpr=fpr, rectdecay=rd, dec=dec) velw = mb.vel refw = normalize(mb.get_refl2d()) lblw = mb.get_label2d() return velw * 0.001, refw, lblw
def fake_fault_img(vel, img, ox=7.035, dx=0.01675, ovx=7.035, dvx=0.0335, dz=0.005): """ Puts a fake fault in the Hale/BEI image and prepares for the application of the Hessian Parameters: img - the migrated Hale/BEI image [nx,nz] """ nx, nz = img.shape nvx, nvz = vel.shape # Taper the image img = np.ascontiguousarray(img).astype('float32')[20:-20, :] imgt = costaper(img, nw2=60) # Pad the image imgp = np.pad(imgt, ((110, 130), (0, 0)), mode='constant') # Replicate the image to make it 2.5D imgp3d = np.repeat(imgp[np.newaxis], 20, axis=0) veli = vel[np.newaxis] #[ny,nx,nz] veli = np.ascontiguousarray(np.transpose( veli, (2, 0, 1))) # [ny,nx,nz] -> [nz,ny,nx] # Interpolate the velocity model veli = interp_vel(nz, 1, 0.0, 1.0, nx, ox, dx, veli, dvx, 1.0, ovx, 0.0) veli = veli[:, 0, :].T velp = np.pad(veli, ((90, 110), (0, 0)), mode='edge') # Build a model that is the same size minvel = 1600 maxvel = 5000 nlayer = 200 dzm, dxm = dz * 1000, dx * 1000 nzm, nxm = nz, 800 mb = mdlbuild.mdlbuild(nxm, dxm, 20, dy=dxm, dz=dzm, basevel=5000) props = mb.vofz(nlayer, minvel, maxvel, npts=2) thicks = np.random.randint(5, 15, nlayer) dlyr = 0.05 for ilyr in range(nlayer): mb.deposit(velval=props[ilyr], thick=thicks[ilyr], dev_pos=0.0, layer=50, layer_rand=0.00, dev_layer=dlyr) mb.trim(top=0, bot=900) mb.vel[:] = imgp3d[:] mb.fault2d(begx=0.7, begz=0.26, daz=20000, dz=24000, azim=180.0, theta_die=2.5, theta_shift=4.0, dist_die=2.0, throwsc=35.0, fpr=False) refw = normalize(mb.vel) lblw = mb.get_label2d() return velp, refw, lblw
def find_flt_patches(img, mdl, dz, mindepth, nzp=64, nxp=64, strdz=None, strdx=None, pthresh=0.2, nthresh=50, oz=0.0, qcimgs=True): """ Determines if patches contain a fault or not Parameters: img - input fault seismic image [nz,nx] mdl - fault segmentation keras CNN dz - depth sampling mindepth - minimum depth after which to look for faults nzp - size of patch in x dimension [64] nxp - size of patch in z dimension [64] strdz - size of stride in z dimension [None] strdx - size of stride in x dimension [None] pthresh - probability threshold for determining if a pixel contains a fault [0.2] nthresh - number of fault pixels in a patch to determined if it has a fault [50] oz - depth origin [0.0] qcimgs - flag for returning segmented fault image as well as fault patches for QC Returns a patch array where the patches are valued at either one (if patch contains a fault) or zero (if it does not have a fault) """ # Get image dimensions nz = img.shape[0] nx = img.shape[1] # Get strides if (strdz is None): strdz = int(nzp / 2) if (strdx is None): strdx = int(nxp / 2) # Extract patches on the image pe = PatchExtractor((nzp, nxp), stride=(strdz, strdx)) iptch = pe.extract(img) # Flatten patches and make a prediction on each numpz = iptch.shape[0] numpx = iptch.shape[1] iptchf = np.expand_dims(normalize(iptch.reshape([numpz * numpx, nzp, nxp])), axis=-1) fltpred = mdl.predict(iptchf) # Reshape the fault prediction array fltpred = fltpred.reshape([numpz, numpx, nzp, nxp]) # Output arrays hasfault = np.zeros(iptch.shape) flttrsh = np.zeros(iptch.shape) # Check if patch has a fault for izp in range(numpz): for ixp in range(numpx): # Compute current depth z = izp * strdz * dz + oz if (z > mindepth): # Threshold the patch flttrsh[izp, ixp] = thresh(fltpred[izp, ixp], pthresh) if (np.sum(flttrsh[izp, ixp]) > nthresh): hasfault[izp, ixp, :, :] = 1.0 # Reconstruct the images for QC if (qcimgs): faultimg = pe.reconstruct(fltpred) thrshimg = pe.reconstruct(flttrsh) hsfltimg = pe.reconstruct(hasfault) return hasfault, hsfltimg, thresh(thrshimg, 0.0), faultimg else: return hasfault
def extract_focfltptchs(fimg, fltlbl, nxp=64, nzp=64, strdx=32, strdz=32, pixthresh=20, norm=True, qcptchgrd=False, dz=10, dx=10): """ Extracts patches from a faulted image """ # Check that dimg, fimg and fltlbl are the same size if (fimg.shape[0] != fltlbl.shape[0] or fimg.shape[1] != fltlbl.shape[1]): raise Exception( "Input image and fault label must have same dimensions") # Patch extraction on the images pe = PatchExtractor((nzp, nxp), stride=(strdz, strdx)) fptch = pe.extract(fimg) lptch = pe.extract(fltlbl) numpz = fptch.shape[0] numpx = fptch.shape[1] # Output normalized patches nptch = [] if (qcptchgrd): nz = fimg.shape[0] nx = fimg.shape[1] # Plot the patch grid nz = fimg.shape[0] nx = fimg.shape[1] # Plot the patch grid bgz = 0 egz = (nz) * dz / 1000.0 dgz = nzp * dz / 1000.0 bgx = 0 egx = (nx) * dx / 1000.0 dgx = nxp * dx / 1000.0 zticks = np.arange(bgz, egz, dgz) xticks = np.arange(bgx, egx, dgx) fig = plt.figure(figsize=(10, 6)) ax = fig.gca() ax.imshow(fimg, extent=[0, (nx) * dx / 1000.0, (nz) * dz / 1000.0, 0], cmap='gray', interpolation='sinc', vmin=-2.5, vmax=2.5) ax.set_xlabel('X (km)', fontsize=15) ax.set_xlabel('Z (km)', fontsize=15) ax.tick_params(labelsize=15) ax.set_xticks(xticks) ax.set_yticks(zticks) ax.grid(linestyle='-', color='k', linewidth=2) plt.show() # Loop over each patch for izp in range(numpz): for ixp in range(numpx): # Check if patch contains faults if (np.sum(lptch[izp, ixp]) >= pixthresh): if (norm): nptch.append(normalize(fptch[izp, ixp])) else: nptch.append(fptch[izp, ixp]) return np.asarray(nptch)
def faultpatch_labels(img, fltlbl, nxp=64, nzp=64, strdx=32, strdz=32, pixthresh=20, norm=True, ptchimg=False, qcptchgrd=False, dz=10, dx=10): """ Assigns a zero or one to an image patch based on the number of fault pixels present within an image patch Parameters: img - Input seismic image (to be patched) [nz,nx] fltlbl - Input segmentation fault label [nz,nx] nxp - Size of patch in x [64] nzp - Size of patch in z [64] strdx - Patch stride in x [32] strdz - Patch stride in z [32] pixthresh - Number of fault pixels to determine if patch has fault ptchimg - Return the reconstructed patch image [False] qcptchgrd - Makes a plot of the patch grid on the image dx - Lateral sampling for plotting patch grid dz - Vertical sampling for plotting patch grid Returns: Image and label patches [numpz,numpx,nzp,nxp] and the reconstructed label image """ # Check that img and fltlbl are the same size if (img.shape[0] != fltlbl.shape[0] or img.shape[1] != fltlbl.shape[1]): raise Exception( "Input image and fault label must have same dimensions") # Extract the patches pe = PatchExtractor((nzp, nxp), stride=(strdz, strdx)) iptch = pe.extract(img) lptch = pe.extract(fltlbl) numpz = iptch.shape[0] numpx = iptch.shape[1] if (qcptchgrd): nz = img.shape[0] nx = img.shape[1] # Plot the patch grid bgz = 0 egz = (nz) * dz / 1000.0 dgz = nzp * dz / 1000.0 bgx = 0 egx = (nx) * dx / 1000.0 dgx = nxp * dx / 1000.0 zticks = np.arange(bgz, egz, dgz) xticks = np.arange(bgx, egx, dgx) fig = plt.figure(figsize=(10, 6)) ax = fig.gca() ax.imshow(img, extent=[0, (nx) * dx / 1000.0, (nz) * dz / 1000.0, 0], cmap='gray', interpolation='sinc') ax.set_xticks(xticks) ax.set_yticks(zticks) ax.grid(linestyle='-', color='k', linewidth=2) plt.show() # Output image patches iptcho = np.zeros(iptch.shape) # Output patch label ptchlbl = np.zeros(lptch.shape) # Check if patch contains faults for izp in range(numpz): for ixp in range(numpx): if (np.sum(lptch[izp, ixp]) >= pixthresh): ptchlbl[izp, ixp, :, :] = 1 if (norm): iptcho[izp, ixp] = normalize(iptch[izp, ixp, :, :]) else: iptcho[izp, ixp] = iptch[izp, ixp] # Reconstruct the patch label image ptchlblimg = pe.reconstruct(ptchlbl) if (ptchimg): return iptcho, ptchlbl, ptchlblimg else: return iptcho, ptchlbl
def focdefocflt_labels(dimg, fimg, fltlbl, nxp=64, nzp=64, strdx=32, strdz=32, pixthresh=20, metric='mse', focthresh=0.5, norm=True, imgs=False, qcptchgrd=False, dz=10, dx=10): """ Computes the fault-based focused and defocused labels Parameters dimg - Input defocused image [nz,nx] fimg - Input focused image [nz,nx] fltlbl - Input fault labels [nz,nx] nxp - Size of patch in x [64] nzp - Size of patch in z [64] strdx - Patch stride in x [32] strdz - Patch stride in z [32] pixthresh - Number of fault pixels to determine if patch has fault [20] metric - Metric for determining if fault is focused or not (['mse'] or 'ssim') focthresh - Threshold applied to metric to determining focusing [0.5] norm - Normalize the images [True] imgs - Return the label image and the norm image [False] qcptchgrd - Makes a plot of the patch grid on the image [False] dx - Lateral sampling for plotting patch grid [10] dz - Vertical sampling for plotting patch grid [10] """ # Check that dimg, fimg and fltlbl are the same size if (dimg.shape[0] != fltlbl.shape[0] or dimg.shape[1] != fltlbl.shape[1]): raise Exception( "Input image and fault label must have same dimensions") if (dimg.shape[0] != fimg.shape[0] or dimg.shape[1] != fimg.shape[1]): raise Exception( "Input defocused image and defocused image must have same dimensions" ) # Patch extraction on the images pe = PatchExtractor((nzp, nxp), stride=(strdz, strdx)) dptch = pe.extract(dimg) fptch = pe.extract(fimg) lptch = pe.extract(fltlbl) numpz = dptch.shape[0] numpx = dptch.shape[1] if (qcptchgrd): nz = img.shape[0] nx = img.shape[1] # Plot the patch grid bgz = 0 egz = (nz) * dz / 1000.0 dgz = nzp * dz / 1000.0 bgx = 0 egx = (nx) * dx / 1000.0 dgx = nxp * dx / 1000.0 zticks = np.arange(bgz, egz, dgz) xticks = np.arange(bgx, egx, dgx) fig = plt.figure(figsize=(10, 6)) ax = fig.gca() ax.imshow(img, extent=[0, (nx) * dx / 1000.0, (nz) * dz / 1000.0, 0], cmap='gray', interpolation='sinc') ax.set_xticks(xticks) ax.set_yticks(zticks) ax.grid(linestyle='-', color='k', linewidth=2) plt.show() # Output image patches dptcho = [] fptcho = [] # Output patch label ptchlbl = np.zeros(lptch.shape) lptcho = [] # Norm image ptchnrm = np.zeros(lptch.shape) # Loop over each patch for izp in range(numpz): for ixp in range(numpx): # Check if patch contains faults if (np.sum(lptch[izp, ixp]) >= pixthresh): # Compute the desired norm between the two images if (metric == 'mse'): ptchnrm[izp, ixp, :, :] = mse(dptch[izp, ixp], fptch[izp, ixp]) if (ptchnrm[izp, ixp, int(nzp / 2), int(nxp / 2)] >= focthresh): ptchlbl[izp, ixp, :, :] = 0 else: ptchlbl[izp, ixp, :, :] = 1 elif (metric == 'ssim'): ptchnrm[izp, ixp] = ssim(dptch[izp, ixp], fptch[izp, ixp]) if (ptchnrm[izp, ixp, int(nzp / 2), int(nxp / 2)] >= focthresh): ptchlbl[izp, ixp, :, :] = 1 else: ptchlbl[izp, ixp, :, :] = 0 elif (metric == 'corr'): ndptch = normalize(dptch[izp, ixp]) nfptch = normalize(fptch[izp, ixp]) #ptchnrm[izp,ixp] = np.max(correlate2d(ndptch,nfptch),mode='same')) else: raise Exception( "Norm %s not yet implemented. Please try 'ssim' or 'mse'" % (metric)) # Append label and image to output lists lptcho.append(ptchlbl[izp, ixp, int(nzp / 2), int(nxp / 2)]) if (norm): dptcho.append(normalize(dptch[izp, ixp, :, :])) fptcho.append(normalize(fptch[izp, ixp, :, :])) else: dptcho.append(dptch[izp, ixp]) fptcho.append(fptch[izp, ixp]) # Convert to numpy arrays dptcho = np.asarray(dptcho) fptcho = np.asarray(fptcho) lptcho = np.asarray(lptcho) # Reconstruct the patch label image and patch norm image (for QC purposes) ptchlblimg = pe.reconstruct(ptchlbl) ptchnrmimg = pe.reconstruct(ptchnrm) if (imgs): return dptcho, fptcho, lptcho, ptchlblimg, ptchnrmimg else: return dptcho, fptcho, lptcho
def estro_fltangfocdefoc(rimgs,foccnn,dro,oro,nzp=64,nxp=64,strdz=None,strdx=None, # Patching parameters rectz=30,rectx=30,fltthresh=75,fltlbls=None,qcimgs=True,verb=False, fmwrk='torch',device=None): """ Estimates rho by choosing the residually migrated patch that has highest angle gather and fault focus probability given by the neural network Parameters rimgs - residually migrated angle gathers images [nro,na,nz,nx] foccnn - CNN for determining if angle gather/fault is focused or not dro - residual migration sampling oro - residual migration origin nzp - size of patch in z dimension [64] nxp - size of patch in x dimension [64] strdz - size of stride in z dimension [nzp/2] strdx - size of stride in x dimension [nxp/2] rectz - length of smoother in z dimension [30] rectx - length of smoother in x dimension [30] fltlbls - input fault segmentation labels [None] qcimgs - flag for returning the fault focusing probabilities [nro,nz,nx] and fault patches [nz,nx] verb - verbosity flag [False] fmwrk - deep learning framework to be used for the prediction [torch] device - device for pytorch networks Returns an estimate of rho(x,z) """ # Get image dimensions nro = rimgs.shape[0]; na = rimgs.shape[1]; nz = rimgs.shape[2]; nx = rimgs.shape[3] # Get strides if(strdz is None): strdz = int(nzp/2) if(strdx is None): strdx = int(nxp/2) # Build the Patch Extractors pea = PatchExtractor((nro,na,nzp,nxp),stride=(nro,na,strdz,strdx)) aptch = np.squeeze(pea.extract(rimgs)) # Flatten patches and make a prediction on each numpz = aptch.shape[0]; numpx = aptch.shape[1] if(fmwrk == 'torch'): aptchf = normalize(aptch.reshape([nro*numpz*numpx,1,na,nzp,nxp])) with(torch.no_grad()): aptchft = torch.tensor(aptchf) focprdt = torch.zeros([nro*numpz*numpx,1]) for iptch in progressbar(range(aptchf.shape[0]),verb=verb): gptch = aptchft[iptch].to(device) focprdt[iptch] = torch.sigmoid(foccnn(gptch.unsqueeze(0))) focprd = focprdt.cpu().numpy() elif(fmwrk == 'tf'): aptchf = np.expand_dims(normalize(aptch.reshape([nro*numpz*numpx,na,nzp,nxp])),axis=-1) focprd = foccnn.predict(aptchf,verbose=verb) elif(fmwrk is None): aptchf = normalize(aptch.reshape([nro*numpz*numpx,na,nzp,nxp])) focprd = np.zeros([nro*numpz*numpx,1]) for iptch in progressbar(range(aptchf.shape[0]),verb=verb): focprd[iptch] = semblance_power(aptchf[iptch]) # Assign prediction to entire patch for QC focprdptch = np.zeros([numpz*numpx*nro,nzp,nxp]) for iptch in range(nro*numpz*numpx): focprdptch[iptch,:,:] = focprd[iptch] focprdptch = focprdptch.reshape([numpz,numpx,nro,nzp,nxp]) # Save predictions as a function of rho only focprdr = focprd.reshape([numpz,numpx,nro]) # Output rho image pe = PatchExtractor((nzp,nxp),stride=(strdz,strdx)) rho = np.zeros([nz,nx]) rhop = pe.extract(rho) # Output probabilities focprdnrm = np.zeros(focprdptch.shape) per = PatchExtractor((nro,nzp,nxp),stride=(nro,strdz,strdx)) focprdimg = np.zeros([nro,nz,nx]) _ = per.extract(focprdimg) if(fltlbls is None): fltptch = np.ones([numpz,numpx,nzp,nxp],dtype='int') else: pef = PatchExtractor((nzp,nxp),stride=(strdz,strdx)) fltptch = pef.extract(fltlbls) # Estimate rho from angle-fault focus probabilities hlfz = int(nzp/2); hlfx = int(nxp/2) for izp in range(numpz): for ixp in range(numpx): if(np.sum(fltptch[izp,ixp]) > fltthresh): # Find maximum probability and compute rho iprb = focprdptch[izp,ixp,:,hlfz,hlfx] rhop[izp,ixp,:,:] = np.argmax(iprb)*dro + oro # Normalize across rho within a patch for QC if(np.max(focprdptch[izp,ixp,:,hlfz,hlfx]) == 0.0): focprdnrm[izp,ixp,:,:,:] = 0.0 else: focprdnrm[izp,ixp,:,:,:] = focprdptch[izp,ixp,:,:,:]/np.max(focprdptch[izp,ixp,:,hlfz,hlfx]) else: rhop[izp,ixp,:,:] = 1.0 # Reconstruct rho and probabiliites rho = pe.reconstruct(rhop) focprdimg = per.reconstruct(focprdnrm.reshape([1,numpz,numpx,nro,nzp,nxp])) # Smooth and return rho, fault patches and fault probabilities rhosm = smooth(rho.astype('float32'),rect1=rectx,rect2=rectz) if(qcimgs): focprdimgsm = np.zeros(focprdimg.shape) # Smooth the fault focusing for each rho for iro in range(nro): focprdimgsm[iro] = smooth(focprdimg[iro].astype('float32'),rect1=rectx,rect2=rectz) # Return images return rhosm,focprdimgsm,focprdr else: return rhosm
def estro_fltfocdefoc(rimgs,foccnn,dro,oro,nzp=64,nxp=64,strdz=None,strdx=None, # Patching parameters hasfault=None,rectz=30,rectx=30,qcimgs=True): """ Estimates rho by choosing the residually migrated patch that has highest fault focus probability given by the neural network Parameters rimgs - residually migrated images [nro,nz,nx] foccnn - CNN for determining if fault is focused or not dro - residual migration sampling oro - residual migration origin nzp - size of patch in z dimension [64] nxp - size of patch in x dimension [64] strdz - size of stride in z dimension [nzp/2] strdx - size of stride in x dimension [nxp/2] hasfault - array indicating if a patch has faults or not [None] If None, all patches are considered to have faults rectz - length of smoother in z dimension [30] rectx - length of smoother in x dimension [30] qcimgs - flag for returning the fault focusing probabilities [nro,nz,nx] and fault patches [nz,nx] Returns an estimate of rho(x,z) """ # Get image dimensions nro = rimgs.shape[0]; nz = rimgs.shape[1]; nx = rimgs.shape[2] # Get strides if(strdz is None): strdz = int(nzp/2) if(strdx is None): strdx = int(nxp/2) # Extract patches from residual migration image per = PatchExtractor((nro,nzp,nxp),stride=(nro,strdz,strdx)) rptch = np.squeeze(per.extract(rimgs)) # Flatten patches and make a prediction on each numpz = rptch.shape[0]; numpx = rptch.shape[1] rptchf = np.expand_dims(normalize(rptch.reshape([nro*numpz*numpx,nzp,nxp])),axis=-1) focprd = foccnn.predict(rptchf) # Assign prediction to entire patch for QC focprdptch = np.zeros(rptchf.shape) for iptch in range(nro*numpz*numpx): focprdptch[iptch,:,:] = focprd[iptch] focprdptch = focprdptch.reshape([numpz,numpx,nro,nzp,nxp]) if(hasfault is None): hasfault = np.ones([numpz,numpx,nzp,nxp],dtype='int') # Output rho image rho = np.zeros([nz,nx]) pe = PatchExtractor((nzp,nxp),stride=(strdz,strdx)) rhop = pe.extract(rho) # Using hasfault array, estimate rho from fault focus probabilities hlfz = int(nzp/2); hlfx = int(nxp/2) for izp in range(numpz): for ixp in range(numpx): if(hasfault[izp,ixp,hlfz,hlfx]): # Find maximum probability and compute rho iprb = focprdptch[izp,ixp,:,hlfz,hlfx] rhop[izp,ixp,:,:] = np.argmax(iprb)*dro + oro else: rhop[izp,ixp,:,:] = 1.0 # Reconstruct the rho, fault patches and fault probabiliites rho = pe.reconstruct(rhop) focprdimg = per.reconstruct(focprdptch.reshape([1,numpz,numpx,nro,nzp,nxp])) # Smooth and return rho, fault patches and fault probabilities rhosm = smooth(rho.astype('float32'),rect1=rectx,rect2=rectz) if(qcimgs): focprdimgsm = np.zeros(focprdimg.shape) # Smooth the fault focusing for each rho for iro in range(nro): focprdimgsm[iro] = smooth(focprdimg[iro].astype('float32'),rect1=rectx,rect2=rectz) # Return images return rhosm,focprdimgsm else: return rhosm
def undulatingrandfaults2d(nz=512,nx=1000,dz=12.5,dx=25.0,nlayer=21,minvel=1600,maxvel=3000,rect=0.5, nfx=3,ofx=0.4,dfx=0.1,ofz=0.3,noctaves=None,npts=None,amp=None): """ Builds a 2D faulted velocity model with undulating layers Returns the velocity model, reflectivity, fault labels and a zero-offset image Parameters: nz - number of depth samples [512] nx - number of lateral samples[1000] dz - depth sampling interval [12.5] dx - lateral sampling interval [12.5] nlayer - number of deposited layers (there exist many fine layers within a deposit) [21] nfx - number of faults [0.3] ofx - Starting position of faults (percentage of total model) [0.4] dx - Spacing between faults (percentage of total model) [0.1] ofz - Central depth of faults (percentage of total model) [0.3] rect - radius for gaussian smoother [0.5] noctaves - octaves perlin parameters for squish [varies between 3 and 6] amp - amplitude of folding [varies between 200 and 500] npts - grid size for perlin noise [3] Returns: The velocity, reflectivity, fault label and image all of size [nx,nz] """ # Model building object # Remember to change dist_die based on ny mb = mdlbuild.mdlbuild(nx,dx,ny=20,dy=dx,dz=dz,basevel=5000) nzi = 1000 # internal size is 1000 # Propagation velocities props = np.linspace(maxvel,minvel,nlayer) # Specify the thicknesses thicks = np.random.randint(40,61,nlayer) dlyr = 0.05 for ilyr in progressbar(range(nlayer), "ndeposit:", 40): mb.deposit(velval=props[ilyr],thick=thicks[ilyr],dev_pos=0.0,layer=50,layer_rand=0.00,dev_layer=dlyr) if(ilyr == int(nlayer-2)): amp = rndut.randfloat(200,500) octs = np.random.randint(2,7) npts = np.random.randint(2,5) mb.squish(amp=amp,azim=90.0,lam=0.4,rinline=0.0,rxline=0.0,mode='perlin',npts=npts,octaves=octs,order=3) # Water deposit mb.deposit(1480,thick=80,layer=150,dev_layer=0.0) # Smooth the interface mb.smooth_model(rect1=1,rect2=5,rect3=1) # Trim model before faulting mb.trim(0,1100) #XXX: Thresh should be a function of theta_shift # Generate the fault positions flttype = np.random.choice([0,1,2,3,4,5]) if(flttype == 0): largefaultblock(mb,0.3,0.7,ofz,nfl=6) elif(flttype == 1): slidingfaultblock(mb,0.3,0.7,ofz,nfl=6) elif(flttype == 2): mediumfaultblock(mb,0.3,0.7,0.25,space=0.02,nfl=10) elif(flttype == 3): mediumfaultblock(mb,0.3,0.7,0.25,space=0.005,nfl=20) elif(flttype == 4): tinyfaultblock(mb,0.3,0.7,0.25,space=0.02,nfl=10) else: tinyfaultblock(mb,0.3,0.7,0.25,space=0.005,nfl=20) # Get the model vel = gaussian_filter(mb.vel[:,:nzi].T,sigma=rect).astype('float32') lbl = mb.get_label2d()[:,:nzi].T ref = mb.get_refl2d()[:,:nzi].T # Parameters for ricker wavelet nt = 250; ot = 0.0; dt = 0.001; ns = int(nt/2) amp = 1.0; dly = 0.125 minf = 100.0; maxf = 120.0 # Create normalized image f = rndut.randfloat(minf,maxf) wav = ricker(nt,dt,f,amp,dly) img = dlut.normalize(np.array([np.convolve(ref[:,ix],wav) for ix in range(nx)])[:,ns:nzi+ns].T) nze = dlut.normalize(bandpass(np.random.rand(nzi,nx)*2-1, 2.0, 0.01, 2, pxd=43))/rndut.randfloat(3,5) img += nze # Window the models and return f1 = 50 velwind = vel[f1:f1+nz,:] lblwind = lbl[f1:f1+nz,:] refwind = ref[f1:f1+nz,:] imgwind = img[f1:f1+nz,:] return velwind,refwind,imgwind,lblwind
def velfaultsrandom(nz=512,nx=1024,ny=20,dz=12.5,dx=25.0,nlayer=20, minvel=1600,maxvel=5000,rect=0.5, verb=True,**kwargs): """ Builds a 2D highly faulted and folded velocity model. Returns the velocity model, reflectivity, fault labels and a zero-offset image Parameters: nz - number of depth samples [512] nx - number of lateral samples [1024] dz - depth sampling interval [25.0] dx - lateral sampling interval [25.0] nlayer - number of deposited layers (there exist many fine layers within a deposit) [20] minvel - minimum velocity in model [1600] maxvel - maximum velocity in model [5000] rect - length of gaussian smoothing [0.5] verb - verbosity flag [True] Returns The velocity, reflectivity, fault label and image all of size [nx,nz] """ # Internal model size nzi = 1000; nxi = 1000 # Model building object mb = mdlbuild.mdlbuild(nxi,dx,ny,dy=dx,dz=dz,basevel=5000) # First build the v(z) model props = mb.vofz(nlayer,minvel,maxvel,npts=kwargs.get('nptsvz',2)) # Specify the thicknesses thicks = np.random.randint(40,61,nlayer) # Determine when to fold the deposits sqlyrs = sorted(mb.findsqlyrs(3,nlayer,5)) csq = 0 dlyr = 0.05 for ilyr in progressbar(range(nlayer), "ndeposit:", 40, verb=verb): mb.deposit(velval=props[ilyr],thick=thicks[ilyr],dev_pos=0.0, layer=kwargs.get('layer',150),layer_rand=0.00,dev_layer=dlyr) # Random folding if(ilyr in sqlyrs): if(sqlyrs[csq] < 15): # Random amplitude variation in the folding amp = np.random.rand()*(3000-500) + 500 mb.squish(amp=amp,azim=90.0,lam=0.4,rinline=0.0,rxline=0.0,mode='perlin',order=3) elif(sqlyrs[csq] >= 15 and sqlyrs[csq] < 18): amp = np.random.rand()*(1800-500) + 500 mb.squish(amp=amp,azim=90.0,lam=0.4,rinline=0.0,rxline=0.0,mode='perlin',order=3) else: amp = np.random.rand()*(500-300) + 300 mb.squish(amp=amp,azim=90.0,lam=0.4,rinline=0.0,rxline=0.0,mode='perlin') csq += 1 # Water deposit mb.deposit(1480,thick=50,layer=150,dev_layer=0.0) # Smooth any unconformities mb.smooth_model(rect1=1,rect2=5,rect3=1) # Trim model before faulting mb.trim(0,1100) # Fault it up! azims = [0.0,180.0] fprs = [True,False] # Large faults nlf = np.random.randint(2,5) for ifl in progressbar(range(nlf), "nlfaults:", 40, verb=verb): azim = np.random.choice(azims) fpr = np.random.choice(fprs) xpos = rndut.randfloat(0.1,0.9) mb.largefault(azim=azim,begz=0.65,begx=xpos,begy=0.5,dist_die=4.0,tscale=6.0,fpr=fpr,twod=True) # Medium faults nmf = np.random.randint(3,6) for ifl in progressbar(range(nmf), "nmfaults:", 40, verb=verb): azim = np.random.choice(azims) fpr = np.random.choice(fprs) xpos = rndut.randfloat(0.05,0.95) mb.mediumfault(azim=azim,begz=0.65,begx=xpos,begy=0.5,dist_die=4.0,tscale=3.0,fpr=fpr,twod=True) # Small faults (sliding or small) nsf = np.random.randint(5,10) for ifl in progressbar(range(nsf), "nsfaults:", 40, verb=verb): azim = np.random.choice(azims) fpr = np.random.choice(fprs) xpos = rndut.randfloat(0.05,0.95) zpos = rndut.randfloat(0.2,0.5) mb.smallfault(azim=azim,begz=zpos,begx=xpos,begy=0.5,dist_die=4.0,tscale=2.0,fpr=fpr,twod=True) # Tiny faults ntf = np.random.randint(5,10) for ifl in progressbar(range(ntf), "ntfaults:", 40, verb=verb): azim = np.random.choice(azims) xpos = rndut.randfloat(0.05,0.95) zpos = rndut.randfloat(0.15,0.3) mb.tinyfault(azim=azim,begz=zpos,begx=xpos,begy=0.5,dist_die=4.0,tscale=2.0,twod=True) # Parameters for ricker wavelet nt = kwargs.get('nt',250); ot = 0.0; dt = kwargs.get('dt',0.001); ns = int(nt/2) amp = 1.0; dly = kwargs.get('dly',0.125) minf = kwargs.get('minf',60.0); maxf = kwargs.get('maxf',100.0) f = kwargs.get('f',None) # Get model vel = gaussian_filter(mb.vel[:,:nzi],sigma=rect).astype('float32') lbl = mb.get_label2d()[:,:nzi] # Resample to output size velr = dlut.resample(vel,[nx,nz],kind='quintic') lblr = dlut.thresh(dlut.resample(lbl,[nx,nz],kind='linear'),0) refr = mb.calcrefl2d(velr) # Create normalized image if(f is None): f = rndut.randfloat(minf,maxf) wav = ricker(nt,dt,f,amp,dly) img = dlut.normalize(np.array([np.convolve(refr[ix,:],wav) for ix in range(nx)])[:,ns:nz+ns]) # Create noise nze = dlut.normalize(bandpass(np.random.rand(nx,nz)*2-1, 2.0, 0.01, 2, pxd=43))/rndut.randfloat(3,5) img += nze if(kwargs.get('transp',False) == True): velt = np.ascontiguousarray(velr.T).astype('float32') reft = np.ascontiguousarray(refr.T).astype('float32') imgt = np.ascontiguousarray(img.T).astype('float32') lblt = np.ascontiguousarray(lblr.T).astype('float32') else: velt = np.ascontiguousarray(velr).astype('float32') reft = np.ascontiguousarray(refr).astype('float32') imgt = np.ascontiguousarray(img).astype('float32') lblt = np.ascontiguousarray(lblr).astype('float32') if(kwargs.get('km',True)): velt /= 1000.0 return velt,reft,imgt,lblt
def layeredfaults2d(nz=512,nx=1000,dz=12.5,dx=25.0,nlayer=21,minvel=1600,maxvel=3000,rect=0.5, nfx=3,ofx=0.4,dfx=0.1,ofz=0.3): """ Builds a 2D layered, v(z) fault model. Returns the velocity model, reflectivity, fault labels and a zero-offset image Parameters: nz - number of depth samples [512] nx - number of lateral samples[1000] dz - depth sampling interval [12.5] dx - lateral sampling interval [12.5] nlayer - number of deposited layers (there exist many fine layers within a deposit) [21] nfx - number of faults [0.3] ofx - Starting position of faults (percentage of total model) [0.4] dx - Spacing between faults (percentage of total model) [0.1] ofz - Central depth of faults (percentage of total model) [0.3] rect - radius for gaussian smoother [0.5] Returns: The velocity, reflectivity, fault label and image all of size [nx,nz] """ # Model building object mb = mdlbuild.mdlbuild(nx,dx,ny=200,dy=dx,dz=dz,basevel=5000) nzi = 1000 # internal size is 1000 # Propagation velocities props = np.linspace(maxvel,minvel,nlayer) # Specify the thicknesses thicks = np.random.randint(40,61,nlayer) dlyr = 0.05 for ilyr in progressbar(range(nlayer), "ndeposit:", 40): mb.deposit(velval=props[ilyr],thick=thicks[ilyr],dev_pos=0.0,layer=50,layer_rand=0.00,dev_layer=dlyr) # Water deposit mb.deposit(1480,thick=80,layer=150,dev_layer=0.0) # Trim model before faulting mb.trim(0,1100) # Put in the faults for ifl in progressbar(range(nfx), "nfaults:"): x = ofx + ifl*dfx mb.fault2d(begx=x,begz=ofz,daz=8000,dz=5000,azim=0.0,theta_die=11,theta_shift=4.0,dist_die=0.3,throwsc=10.0) # Get the model vel = gaussian_filter(mb.vel[:,:nzi].T,sigma=rect).astype('float32') lbl = mb.get_label2d()[:,:nzi].T ref = mb.get_refl2d()[:,:nzi].T # Parameters for ricker wavelet nt = 250; ot = 0.0; dt = 0.001; ns = int(nt/2) amp = 1.0; dly = 0.125 minf = 100.0; maxf = 120.0 # Create normalized image f = rndut.randfloat(minf,maxf) wav = ricker(nt,dt,f,amp,dly) img = dlut.normalize(np.array([np.convolve(ref[:,ix],wav) for ix in range(nx)])[:,ns:nzi+ns].T) nze = dlut.normalize(bandpass(np.random.rand(nzi,nx)*2-1, 2.0, 0.01, 2, pxd=43))/rndut.randfloat(3,5) img += nze # Window the models and return f1 = 50 velwind = vel[f1:f1+nz,:] lblwind = lbl[f1:f1+nz,:] refwind = ref[f1:f1+nz,:] imgwind = img[f1:f1+nz,:] return velwind,refwind,imgwind,lblwind