Exemple #1
0
def create_constptb_loc(nz, nx, ptb, naz, nax, cz, cx, rectx=20, rectz=20):
    """
  Creates a constant perturbation of size nax by nax and at position
  (cz,cx)

  Parameters
    nz    - number of depth samples of output velocity model
    nx    - number of lateral samples of output velocity model
    ptb   - percent perturbation
    naz   - number of depth samples of perturbation
    nax   - number of lateral samples of perturbation
    cz    - z center position of anomaly
    cx    - x center position of anomaly
    rectx - number of points to smooth in x [20]
    rectz - number of points to smooth in z [20]

  Returns a model [nz,nx] with the anomaly positioned at (cz,cx) in the model
  """
    velc = np.zeros([naz, nax], dtype='float32') + ptb
    pz1 = cz - int(naz / 2)
    if (naz % 2 != 0): pz1 -= 1
    if (pz1 < 0):
        cz = int(naz / 2)
    pz2 = nz - cz - int(naz / 2)
    px1 = cx - int(nax / 2)
    if (nax % 2 != 0): px1 -= 1
    if (px1 < 0):
        cx = int(nax / 2)
    px2 = nx - cx - int(nax / 2)
    velcp = np.pad(velc, ((pz1, pz2), (px1, px2)),
                   'constant',
                   constant_values=1)
    return smooth(velcp, rect1=rectx, rect2=rectz)
Exemple #2
0
def focdefocang(img,
                mdl,
                nzp=64,
                nxp=64,
                strdz=None,
                strdx=None,
                rectx=30,
                rectz=30,
                verb=False):
    """
  Classifies an angle gather as focused or defocused

  Parameters:
    img   - the input extended image [na,nz,nx]
    mdl   - the trained keras model
    nzp   - z-dimension of the patch provided to the CNN [64]
    nxp   - x-dimension of the patch provided to the CNN [64]
    strdz - z-dimension of the patch stride (50% overlap) [npz/2]
    strdx - x-dimension of the patch stride (50% overlap) [npx/2]
    rectz - number of points to smooth in z direction [30]
    rectx - number of points to smooth in x direction [30]

  Returns a smooth probability map of focused/defocused faults
  """

    # Get image dimensions
    na = img.shape[0]
    nz = img.shape[1]
    nx = img.shape[2]

    # Get strides
    if (strdz is None): strdz = int(nzp / 2)
    if (strdx is None): strdx = int(nxp / 2)

    # Build the Patch Extractors
    pea = PatchExtractor((na, nzp, nxp), stride=(na, strdz, strdx))
    aptch = np.squeeze(pea.extract(img))
    # Flatten patches and make a prediction on each
    numpz = aptch.shape[0]
    numpx = aptch.shape[1]
    aptchf = np.expand_dims(normalize(
        aptch.reshape([numpz * numpx, na, nzp, nxp])),
                            axis=-1)
    focprd = mdl.predict(aptchf)

    focprdptch = np.zeros([numpz * numpx, nzp, nxp])
    for iptch in range(numpz * numpx):
        focprdptch[iptch, :, :] = focprd[iptch]
    focprdptch = focprdptch.reshape([numpz, numpx, nzp, nxp])

    # Output probabilities
    per = PatchExtractor((nzp, nxp), stride=(strdz, strdx))
    focprdimg = np.zeros([nz, nx])
    _ = per.extract(focprdimg)

    focprdimg = per.reconstruct(focprdptch.reshape([numpz, numpx, nzp, nxp]))

    focprdimgsm = smooth(focprdimg.astype('float32'), rect1=rectx, rect2=rectz)

    return focprdimgsm
Exemple #3
0
def salt_mask(img, vel, saltvel, thresh=0.95, rectx=30, rectz=30):
    """
  Masks the image based on the salt velocity

  Parameters
    img     - input image (can be extended or not) [nhy,nhx,nz,ny,nx]
    vel     - input migration velocity model [nz,ny,nx]
    saltvel - salt velocity for masking
    thresh  - mask threshold [0.95]
    rectx   - amount of smoothing to be applied to mask along x
    rectz   - amount of smoothing to be applied to mask along z

  Returns mask and masked image (msk,maskedimg)
  """
    # Create the mask
    idx = vel >= saltvel
    msk = np.ascontiguousarray(np.copy(vel)).astype('float32')
    msk[idx] = 0.0
    msk[~idx] = 1.0
    if (len(msk.shape) == 3):
        mskw = msk[:, 0, :]
    else:
        mskw = msk

    # Smooth and threshold the mask
    smmsk = smooth(mskw, rect1=30, rect2=30)
    idx2 = smmsk > 0.95
    smmsk[idx2] = 1.0
    smmsk[~idx2] = 0.0
    smmsk2 = smooth(smmsk, rect1=2, rect2=2)

    # Apply the mask to the image
    imgo = np.zeros(img.shape, dtype='float32')
    if (len(img.shape) == 5):
        nhx = img.shape[1]
        for ihx in range(nhx):
            imgo[0, ihx, :, 0, :] = smmsk2 * img[0, ihx, :, 0, :]
    elif (len(img.shape) == 3):
        imgo = smmsk2 * img[:, 0, :]
    elif (len(img.shape) == 2):
        imgo = smmsk2 * img

    return smmsk2, imgo
Exemple #4
0
def semblance_power(img, transp=False):
    """
  A semblance metric for measuring flatness of angle gathers.

  Parameters:
    img - the input image [na,nx,nx]
  """
    if (len(img.shape) != 3):
        raise Exception("Input image must be 3D")

    stack = np.sum(img, axis=0)
    stacksq = stack * stack
    num = smooth(stacksq.astype('float32'), rect1=3, rect2=10)

    sqstack = np.sum(img * img, axis=0)
    denom = smooth(sqstack.astype('float32'), rect1=3, rect2=10)

    semb = num / denom

    return np.sum(semb)
Exemple #5
0
  def fpr_mask(self,lbltm,lbltn,dec=0.9,rectdecay=10,rectspread=3):
    """
    Creates a mask for creating the effect of a fault plane reflection
    along a fault

    Parameters:
      lbltm      - the normalized fault displacement
      lbltn      - the fault label (thresholded fault displacement)
      dec        - the percent decrease of the velocity along the fault
                   (if None, randomly selected between 90-95%)
      rectdecay  - smoothing length that controls the decay of
                   the reflection [10 points]
      rectspread - smoothing length that controls the spread of the reflection [3 points]
    """
    # Create a mask that smoothly increases from dec to 1.0
    lbltmsm = smooth(lbltm,rect1=rectdecay,rect2=rectdecay)
    mcomp  = 1 - lbltmsm        # Mask complement
    mcomp += 1 - np.min(mcomp)  # Bring up to 1.0
    ampmask = mcomp*lbltn

    # Set the entire label to be velocity decrease percent
    fpmask = 1-lbltn
    zidx = fpmask == 0
    fpmask[zidx] = dec
    # Based on this value, change the max on the amplitude mask (avoids going over one)
    newmax = 1/dec
    midx = ampmask > newmax
    ampmask[midx] = newmax
    # Scale the constant velocity decrease by the amplitude mask
    fpmask *= ampmask
    # Set all zeros to ones
    zidx = fpmask == 0
    fpmask[zidx] = 1.0
    # Smooth to spread out over a few pixels
    fpmasksm = smooth(fpmask,rect1=rectspread,rect2=rectspread)

    return fpmasksm
Exemple #6
0
def rho_semb(stormang,gagc=True,norm=True,rectz=10,rectro=3,nthreads=1):
  """
  Computes semblance from residually migrated angle gathers

  Parameters:
    stormang - Stolt resiudally migrated angle gathers [nro,nx,na,nz]
    gagc     - Apply agc [True]
    rectz    - Smoothing window along z direction [10 points]
    rectro   - Smoothing window along rho direction [3 points]

  Returns a semblance cube [nx,nro,nz]
  """
  # Get dimensions
  nro,nx,na,nz = stormang.shape
  # Compute agc
  if(gagc):
    angs = np.asarray(Parallel(n_jobs=nthreads)(delayed(agc)(stormang[iro]) for iro in range(nro)))
  else:
    angs = stormang

  # Compute semblance
  stackg  = np.sum(angs,axis=2)
  stacksq = stackg*stackg
  num = smooth(stacksq.astype('float32'),rect1=rectz,rect3=rectro)

  sqstack = np.sum(angs*angs,axis=2)
  den = smooth(sqstack.astype('float32'),rect1=rectz,rect3=rectro)

  semb = num/den

  sembt = np.transpose(semb,(1,0,2)) # [nro,nx,nz] -> [nx,nro,nz]

  if(norm):
    sembt /= np.max(sembt)

  return sembt
Exemple #7
0
  def smooth_model(self,rect1=2,rect2=2,rect3=2,sigma=None):
    """
    Applies either a triangular or gaussian smoother to the velocity model.
    Default is a triangular smoother

    Parameters
      rect1 - Length of triangular filter along z-axis [2 gridpoints]
      rect2 - Length of triangular filter along x-axis [2 gridpoints]
      rect3 - Length of triangular filter along y-axis [2 gridpoints]
      sigma - size of gaussian filter [None]
    """
    if(sigma is not None):
      self.vel = gaussian_filter(self.vel,sigma=sigma).astype('float32')
    else:
      self.vel = smooth(self.vel,rect1=rect1,rect2=rect2,rect3=rect3)
Exemple #8
0
def detectfaultpatch(img,
                     mdl,
                     nzp=64,
                     nxp=64,
                     strdz=None,
                     strdx=None,
                     rectx=30,
                     rectz=30,
                     verb=False):
    """
  Detects if a fault is present within an image or not

  Parameters:
    img   - the input image [nz,nx]
    mdl   - the trained keras model
    nzp   - z-dimension of the patch provided to the CNN [128]
    nxp   - x-dimension of the patch provided to the CNN [128]
    strdz - z-dimension of the patch stride (50% overlap) [npz/2]
    strdx - x-dimension of the patch stride (50% overlap) [npx/2]
    rectz - number of points to smooth in z direction [30]
    rectx - number of points to smooth in x direction [30]

  Returns a smooth probability map of detected faults
  """
    # Resample to nearest power of 2
    rimg = resizepow2(img, kind='linear')
    # Perform the patch extraction
    if (strdz is None): strdz = int(nzp / 2)
    if (strdx is None): strdx = int(nxp / 2)
    pe = PatchExtractor((nzp, nxp), stride=(strdx, strdz))
    iptch = pe.extract(rimg)
    numpz = iptch.shape[0]
    numpx = iptch.shape[1]
    iptch = iptch.reshape([numpx * numpz, nzp, nxp, 1])
    # Normalize and predict for each patch
    iprd = np.zeros(iptch.shape)
    for ip in range(numpz * numpx):
        iprd[ip, :, :] = mdl.predict(
            np.expand_dims(normalize(iptch[ip, :, :]), axis=0))
    # Reconstruct the predictions
    ipra = iprd.reshape([numpz, numpx, nzp, nxp])
    iprb = pe.reconstruct(ipra)

    # Smooth the predictions
    smprb = smooth(iprb.astype('float32'), rect1=rectx, rect2=rectz)

    return smprb
Exemple #9
0
def agc(dat, rect1=125, transp=False):
    """
  Applies an automatic gain control (AGC) to the data/image
  Applies it trace by trace (assumes t or z is the fast axis)

  Parameters:
    dat    - the input data/image [nx,nt/nz]
    rect1  - size of AGC window along the fast axis
    transp - transpose a 2D image so that t/z is the fast axis

  Returns the gained data
  """
    if (transp):
        dat = np.ascontiguousarray(dat.T)
    # First compute the absolute value of the data
    databs = np.abs(dat)
    # Smooth the absolute value
    databssm = smooth(databs, rect1=rect1)
    # Divide by the smoothed amplitude
    idx = databssm <= 0
    databssm[idx] = 1
    return dat / databssm
Exemple #10
0
def anglemask(nz,na,zpos=None,apos=None,rectz=10,recta=10,mode='slant',rand=True,verb=False):
  """
  Creates an angle mask that can be applied to an
  angle gather to limit the number of angles (illumination)

  Parameters:
    nz    - number of depth samples
    na    - number of angle samples
    zpos  - percentage in z where to begin the mask [0.3]
    apos  - percentage in a where to end the mask [0.6]
    rectz - number of points to smooth the mask in z [10]
    recta - number of points to smooth the mask in a [10]
    mode  - mode of creating mask. Either a vertical mask ('vert') or slanted ['slant']
    rand  - add smooth random variation to the mask [True]
    verb  - verbosity flag [False]

  Returns a single angle gather mask [nz,na]
  """
  # Create z(a) linear function
  if(zpos is None):
    z0 = int(0.3*nz)
  else:
    z0 = int(zpos*nz)
  if(apos is None):
    a0 = int(0.6*na)
  else:
    a0 = int(apos*na)

  if(mode == 'vert'):
    if(apos > 0.5):
      raise Exception("Please a choose an a0 < 0.5 for vertical mask mode")
    abeg = a0; aend = na-a0
    mask = np.ones([nz,na])
    mask[:,0:abeg] = 0.0
    mask[:,aend:] = 0.0
    if(verb):
      print("z0=%d a0=%d af=%d"%(z0,a0,aend))

  elif(mode == 'slant'):
    zf = nz-1; af = na-1
    if(verb):
      print("z0=%d a0=%d zf=%d af=%d"%(z0,a0,zf,af))

    # Slope and intercept
    m = (zf - z0)/(a0 - af)
    b = (z0*a0 - zf*af)/(a0 - af)

    a = np.array(range(a0,na))
    zofa = (a*m + b).astype(int)

    if(rand):
      noise = 250*perlin(x=np.linspace(0,2,len(zofa)), octaves=2, persist=0.3, ncpu=1)
      noise = noise - np.mean(noise)
      zofa += noise.astype(int)

    # Create mask
    liner = np.ones([nz,na])
    j = 0
    for ia in a:
      liner[zofa[j]:,ia] = 0
      j += 1

    # Flip for symmetry
    linel = np.fliplr(liner)
    mask = linel*liner

  masksm = smooth(mask.astype('float32'),rect1=recta,rect2=rectz)

  return masksm
Exemple #11
0
def estro_varimax(rimgs,dro,oro,nzp=64,nxp=64,strdz=None,strdx=None,rectz=30,rectx=30,qcimgs=True):
  """
  Estimates rho by choosing the residually migrated patch that has
  the highest image entropy computed via the varimax norm

  Parameters
    rimgs      - residually migrated images [nro,nz,nx]
    dro        - residual migration sampling
    oro        - residual migration origin
    nzp        - size of patch in z dimension [64]
    nxp        - size of patch in x dimension [64]
    strdz      - size of stride in z dimension [nzp/2]
    strdx      - size of stride in x dimension [nxp/2]
    rectz      - length of smoother in z dimension [30]
    rectx      - length of smoother in x dimension [30]
    qcimgs     - flag for returning the smoothed varimax norm [nro,nz,nx]

  Returns an estimate of rho(x,z)
  """
  # Get image dimensions
  nro = rimgs.shape[0]; nz = rimgs.shape[1]; nx = rimgs.shape[2]

  # Get strides
  if(strdz is None): strdz = int(nzp/2)
  if(strdx is None): strdx = int(nxp/2)

  # Extract patches from residual migration image
  per = PatchExtractor((nro,nzp,nxp),stride=(nro,strdz,strdx))
  rptch = np.squeeze(per.extract(rimgs))
  # Flatten patches and make a prediction on each
  numpz = rptch.shape[0]; numpx = rptch.shape[1]
  nptch  = nro*numpz*numpx
  rptchf = rptch.reshape([nptch,nzp,nxp])
  norm = np.zeros(rptchf.shape)
  for iptch in range(nptch):
    norm[iptch,:,:] = varimax(rptchf[iptch])

  # Assign prediction to entire patch for QC
  normptch = norm.reshape([numpz,numpx,nro,nzp,nxp])

  # Output rho image
  rho = np.zeros([nz,nx])
  pe = PatchExtractor((nzp,nxp),stride=(strdz,strdx))
  rhop = pe.extract(rho)

  # Using hasfault array, estimate rho from fault focus probabilities
  hlfz = int(nzp/2); hlfx = int(nxp/2)
  for izp in range(numpz):
    for ixp in range(numpx):
      # Find maximum entropy and compute rho
      ient = normptch[izp,ixp,:,hlfz,hlfx]
      rhop[izp,ixp,:,:] = np.argmax(ient)*dro + oro

  # Reconstruct the rho, fault patches and fault probabiliites
  rho     = pe.reconstruct(rhop)
  normimg = per.reconstruct(normptch.reshape([1,numpz,numpx,nro,nzp,nxp]))

  # Smooth and return rho, fault patches and fault probabilities
  rhosm = smooth(rho.astype('float32'),rect1=rectx,rect2=rectz)
  if(qcimgs):
    normimgsm = np.zeros(normimg.shape)
    # Smooth the fault focusing for each rho
    for iro in range(nro):
      normimgsm[iro] = smooth(normimg[iro].astype('float32'),rect1=rectx,rect2=rectz)
    # Return images
    return rhosm,normimgsm
  else:
    return rhosm
Exemple #12
0
def estro_fltangfocdefoc(rimgs,foccnn,dro,oro,nzp=64,nxp=64,strdz=None,strdx=None, # Patching parameters
                         rectz=30,rectx=30,fltthresh=75,fltlbls=None,qcimgs=True,verb=False,
                         fmwrk='torch',device=None):
  """
  Estimates rho by choosing the residually migrated patch that has
  highest angle gather and fault focus probability given by the neural network

  Parameters
    rimgs      - residually migrated angle gathers images [nro,na,nz,nx]
    foccnn     - CNN for determining if angle gather/fault is focused or not
    dro        - residual migration sampling
    oro        - residual migration origin
    nzp        - size of patch in z dimension [64]
    nxp        - size of patch in x dimension [64]
    strdz      - size of stride in z dimension [nzp/2]
    strdx      - size of stride in x dimension [nxp/2]
    rectz      - length of smoother in z dimension [30]
    rectx      - length of smoother in x dimension [30]
    fltlbls    - input fault segmentation labels [None]
    qcimgs     - flag for returning the fault focusing probabilities [nro,nz,nx]
                 and fault patches [nz,nx]
    verb       - verbosity flag [False]
    fmwrk      - deep learning framework to be used for the prediction [torch]
    device     - device for pytorch networks

  Returns an estimate of rho(x,z)
  """
  # Get image dimensions
  nro = rimgs.shape[0]; na = rimgs.shape[1]; nz = rimgs.shape[2]; nx = rimgs.shape[3]

  # Get strides
  if(strdz is None): strdz = int(nzp/2)
  if(strdx is None): strdx = int(nxp/2)

  # Build the Patch Extractors
  pea = PatchExtractor((nro,na,nzp,nxp),stride=(nro,na,strdz,strdx))
  aptch = np.squeeze(pea.extract(rimgs))
  # Flatten patches and make a prediction on each
  numpz = aptch.shape[0]; numpx = aptch.shape[1]
  if(fmwrk == 'torch'):
    aptchf = normalize(aptch.reshape([nro*numpz*numpx,1,na,nzp,nxp]))
    with(torch.no_grad()):
      aptchft = torch.tensor(aptchf)
      focprdt = torch.zeros([nro*numpz*numpx,1])
      for iptch in progressbar(range(aptchf.shape[0]),verb=verb):
        gptch = aptchft[iptch].to(device)
        focprdt[iptch] = torch.sigmoid(foccnn(gptch.unsqueeze(0)))
      focprd = focprdt.cpu().numpy()
  elif(fmwrk == 'tf'):
    aptchf = np.expand_dims(normalize(aptch.reshape([nro*numpz*numpx,na,nzp,nxp])),axis=-1)
    focprd = foccnn.predict(aptchf,verbose=verb)
  elif(fmwrk is None):
    aptchf = normalize(aptch.reshape([nro*numpz*numpx,na,nzp,nxp]))
    focprd = np.zeros([nro*numpz*numpx,1])
    for iptch in progressbar(range(aptchf.shape[0]),verb=verb):
      focprd[iptch] = semblance_power(aptchf[iptch])

  # Assign prediction to entire patch for QC
  focprdptch = np.zeros([numpz*numpx*nro,nzp,nxp])
  for iptch in range(nro*numpz*numpx): focprdptch[iptch,:,:] = focprd[iptch]
  focprdptch = focprdptch.reshape([numpz,numpx,nro,nzp,nxp])

  # Save predictions as a function of rho only
  focprdr = focprd.reshape([numpz,numpx,nro])

  # Output rho image
  pe = PatchExtractor((nzp,nxp),stride=(strdz,strdx))
  rho = np.zeros([nz,nx])
  rhop = pe.extract(rho)

  # Output probabilities
  focprdnrm = np.zeros(focprdptch.shape)
  per = PatchExtractor((nro,nzp,nxp),stride=(nro,strdz,strdx))
  focprdimg = np.zeros([nro,nz,nx])
  _ = per.extract(focprdimg)

  if(fltlbls is None):
    fltptch = np.ones([numpz,numpx,nzp,nxp],dtype='int')
  else:
    pef = PatchExtractor((nzp,nxp),stride=(strdz,strdx))
    fltptch = pef.extract(fltlbls)

  # Estimate rho from angle-fault focus probabilities
  hlfz = int(nzp/2); hlfx = int(nxp/2)
  for izp in range(numpz):
    for ixp in range(numpx):
      if(np.sum(fltptch[izp,ixp]) > fltthresh):
        # Find maximum probability and compute rho
        iprb = focprdptch[izp,ixp,:,hlfz,hlfx]
        rhop[izp,ixp,:,:] = np.argmax(iprb)*dro + oro
        # Normalize across rho within a patch for QC
        if(np.max(focprdptch[izp,ixp,:,hlfz,hlfx]) == 0.0):
          focprdnrm[izp,ixp,:,:,:] = 0.0
        else:
          focprdnrm[izp,ixp,:,:,:] = focprdptch[izp,ixp,:,:,:]/np.max(focprdptch[izp,ixp,:,hlfz,hlfx])
      else:
        rhop[izp,ixp,:,:] = 1.0

  # Reconstruct rho and probabiliites
  rho       = pe.reconstruct(rhop)
  focprdimg = per.reconstruct(focprdnrm.reshape([1,numpz,numpx,nro,nzp,nxp]))

  # Smooth and return rho, fault patches and fault probabilities
  rhosm = smooth(rho.astype('float32'),rect1=rectx,rect2=rectz)
  if(qcimgs):
    focprdimgsm = np.zeros(focprdimg.shape)
    # Smooth the fault focusing for each rho
    for iro in range(nro):
      focprdimgsm[iro] = smooth(focprdimg[iro].astype('float32'),rect1=rectx,rect2=rectz)
    # Return images
    return rhosm,focprdimgsm,focprdr
  else:
    return rhosm
Exemple #13
0
def estro_fltfocdefoc(rimgs,foccnn,dro,oro,nzp=64,nxp=64,strdz=None,strdx=None, # Patching parameters
                      hasfault=None,rectz=30,rectx=30,qcimgs=True):
  """
  Estimates rho by choosing the residually migrated patch that has
  highest fault focus probability given by the neural network

  Parameters
    rimgs      - residually migrated images [nro,nz,nx]
    foccnn     - CNN for determining if fault is focused or not
    dro        - residual migration sampling
    oro        - residual migration origin
    nzp        - size of patch in z dimension [64]
    nxp        - size of patch in x dimension [64]
    strdz      - size of stride in z dimension [nzp/2]
    strdx      - size of stride in x dimension [nxp/2]
    hasfault   - array indicating if a patch has faults or not [None]
                 If None, all patches are considered to have faults
    rectz      - length of smoother in z dimension [30]
    rectx      - length of smoother in x dimension [30]
    qcimgs     - flag for returning the fault focusing probabilities [nro,nz,nx]
                 and fault patches [nz,nx]

  Returns an estimate of rho(x,z)
  """
  # Get image dimensions
  nro = rimgs.shape[0]; nz = rimgs.shape[1]; nx = rimgs.shape[2]

  # Get strides
  if(strdz is None): strdz = int(nzp/2)
  if(strdx is None): strdx = int(nxp/2)

  # Extract patches from residual migration image
  per = PatchExtractor((nro,nzp,nxp),stride=(nro,strdz,strdx))
  rptch = np.squeeze(per.extract(rimgs))
  # Flatten patches and make a prediction on each
  numpz = rptch.shape[0]; numpx = rptch.shape[1]
  rptchf = np.expand_dims(normalize(rptch.reshape([nro*numpz*numpx,nzp,nxp])),axis=-1)
  focprd = foccnn.predict(rptchf)

  # Assign prediction to entire patch for QC
  focprdptch = np.zeros(rptchf.shape)
  for iptch in range(nro*numpz*numpx): focprdptch[iptch,:,:] = focprd[iptch]
  focprdptch = focprdptch.reshape([numpz,numpx,nro,nzp,nxp])

  if(hasfault is None):
    hasfault = np.ones([numpz,numpx,nzp,nxp],dtype='int')

  # Output rho image
  rho = np.zeros([nz,nx])
  pe = PatchExtractor((nzp,nxp),stride=(strdz,strdx))
  rhop = pe.extract(rho)

  # Using hasfault array, estimate rho from fault focus probabilities
  hlfz = int(nzp/2); hlfx = int(nxp/2)
  for izp in range(numpz):
    for ixp in range(numpx):
      if(hasfault[izp,ixp,hlfz,hlfx]):
        # Find maximum probability and compute rho
        iprb = focprdptch[izp,ixp,:,hlfz,hlfx]
        rhop[izp,ixp,:,:] = np.argmax(iprb)*dro + oro
      else:
        rhop[izp,ixp,:,:] = 1.0

  # Reconstruct the rho, fault patches and fault probabiliites
  rho       = pe.reconstruct(rhop)
  focprdimg = per.reconstruct(focprdptch.reshape([1,numpz,numpx,nro,nzp,nxp]))

  # Smooth and return rho, fault patches and fault probabilities
  rhosm = smooth(rho.astype('float32'),rect1=rectx,rect2=rectz)
  if(qcimgs):
    focprdimgsm = np.zeros(focprdimg.shape)
    # Smooth the fault focusing for each rho
    for iro in range(nro):
      focprdimgsm[iro] = smooth(focprdimg[iro].astype('float32'),rect1=rectx,rect2=rectz)
    # Return images
    return rhosm,focprdimgsm
  else:
    return rhosm