Example #1
0
def DisplayPyPlot(A,title=''):
  isVec = min(A.Height(),A.Width()) == 1
  if A.tag == cTag or A.tag == zTag:
    AReal = Matrix(Base(A.tag))
    AImag = Matrix(Base(A.tag))
    RealPart(A,AReal)
    ImagPart(A,AImag)
    fig, (ax1,ax2) = plt.subplots(1,2)
    ax1.set_title('Real part')
    ax2.set_title('Imag part')
    if isVec:
      ax1.plot(np.squeeze(AReal.ToNumPy()),'bo-')
      ax2.plot(np.squeeze(AImag.ToNumPy()),'bo-')
    else:
      imReal = ax1.imshow(AReal.ToNumPy())
      cBarReal = fig.colorbar(imReal,ax=ax1)
      imImag = ax2.imshow(AImag.ToNumPy())
      cBarImag = fig.colorbar(imImag,ax=ax2)
    plt.suptitle(title)
    plt.tight_layout()
  else:
    fig = plt.figure()
    axis = fig.add_axes([0.1,0.1,0.8,0.8])
    if isVec:
      axis.plot(np.squeeze(A.ToNumPy()),'bo-')
    else:
      im = axis.imshow(A.ToNumPy())
      fig.colorbar(im,ax=axis)
    plt.title(title)
  plt.draw()
  if not isInlinePyPlot:
    plt.show(block=False)
  return fig
Example #2
0
def getAcc(model,words,f):
    f = open(f,'r')
    lines = f.readlines()
    preds = []
    golds = []
    seq1 = []
    seq2 = []
    ct = 0
    for i in lines:
        i = i.split("\t")
        p1 = i[0]; p2 = i[1]; score = i[2]
        X1, X2 = getSeqs(p1,p2,words)
        seq1.append(X1)
        seq2.append(X2)
        ct += 1
        if ct % 100 == 0:
            x1,m1 = utils.prepare_data(seq1)
            x2,m2 = utils.prepare_data(seq2)
            scores = model.scoring_function(x1,x2,m1,m2)
            scores = np.squeeze(scores)
            preds.extend(scores.tolist())
            seq1 = []
            seq2 = []
        golds.append(score)
    if len(seq1) > 0:
        x1,m1 = utils.prepare_data(seq1)
        x2,m2 = utils.prepare_data(seq2)
        scores = model.scoring_function(x1,x2,m1,m2)
        scores = np.squeeze(scores)
        preds.extend(scores.tolist())
    return acc(preds,golds)
Example #3
0
    def _plot(self,names,title,style,when=0,showLegend=True):
        if isinstance(names,str):
            names = [names]
        assert isinstance(names,list)

        legend = []
        for name in names:
            assert isinstance(name,str)
            legend.append(name)

            # if it's a differential state
            if name in self.xNames:
                index = self.xNames.index(name)
                ys = np.squeeze(self._log['x'])[:,index]
                ts = np.arange(len(ys))*self.Ts
                plt.plot(ts,ys,style)
                
            if name in self.outputNames:
                index = self.outputNames.index(name)
                ys = np.squeeze(self._log['outputs'][name])
                ts = np.arange(len(ys))*self.Ts
                plt.plot(ts,ys,style)

        if title is not None:
            assert isinstance(title,str), "title must be a string"
            plt.title(title)
        plt.xlabel('time [s]')
        if showLegend is True:
            plt.legend(legend)
        plt.grid()
Example #4
0
def surf(z,x=None,y=None,win=None,shade=0,edges=1,edge_color='fg',phi=-45.0,
         theta=30.0,zscale=1.0,palette=None,gnomon=0):
  '''Plot a three-dimensional wire-frame (surface): z=f(x,y)
  '''
  if win is None:
    pl3d.window3()
  else:
    pl3d.window3(win)
  pl3d.set_draw3_(0)
  phi0 = phi*numpy.pi/180.0
  theta0 = theta*numpy.pi/180.0
  pl3d.orient3(phi=phi0,theta=theta0)
  pl3d.light3()
  _change_palette(palette)
  sz = numpy.shape(z)
  if len(sz) != 2:
    raise ValueError('Input must be a 2-d array --- a surface.')
  N,M = sz
  if x is None:
    x = numpy.arange(0,N)
  if y is None:
    y = numpy.arange(0,M)
  x = numpy.squeeze(x)
  y = numpy.squeeze(y)
  if (len(numpy.shape(x)) == 1):
    x = x[:,newaxis]*numpy.ones((1,M))
  if (len(numpy.shape(y)) == 1):
    y = numpy.ones((N,1))*y[newaxis,:]
  plwf.plwf(z,y,x,shade=shade,edges=edges,ecolor=edge_color,scale=zscale)
  lims = pl3d.draw3(1)
  gist.limits(lims[0],lims[1],lims[2],lims[3])
  pl3d.gnomon(gnomon)
Example #5
0
def log_diff_exp(x, axis=0):
    """ Calculates the logarithm of the diffs of e to the power of input 'x'. The method tries to avoid
        overflows by using the relationship: log(diff(exp(x))) = alpha + log(diff(exp(x-alpha))).
        
    :Parameter:
        x:    data.
             -type: float or numpy array 
          
        axis: Sums along the given axis.
             -type: int
        
    :Return:
        Logarithm of the sum of exp of x. 
       -type: float or numpy array.
        
    """
    alpha = x.max(axis) - numx.log(numx.finfo(numx.float64).max)/2.0
    if axis == 1:
        return numx.squeeze(alpha + numx.log(
                                             numx.diff(
                                                       numx.exp(x.T - alpha)
                                                       , n=1, axis=0)))
    else:
        return numx.squeeze(alpha + numx.log(
                                             numx.diff(
                                                       numx.exp(x - alpha)
                                                       , n=1, axis=0)))
Example #6
0
def CoAddFinal(frames, mode='mean', display=True):
    # co-add FINSIHED, reduced spectra
    # only trick: resample on to wavelength grid of 1st frame
    files = np.loadtxt(frames, dtype='string',unpack=True)

    # read in first file
    wave_0, flux_0 = np.loadtxt(files[0],dtype='float',skiprows=1,
                                unpack=True,delimiter=',')

    for i in range(1,len(files)):
        wave_i, flux_i = np.loadtxt(files[i],dtype='float',skiprows=1,
                                    unpack=True,delimiter=',')

        # linear interp on to wavelength grid of 1st frame
        flux_i0 = np.interp(wave_0, wave_i, flux_i)

        flux_0 = np.dstack( (flux_0, flux_i0))

    if mode == 'mean':
        flux_out = np.squeeze(flux_0.sum(axis=2) / len(files))
    if mode == 'median':
        flux_out = np.squeeze(np.median(flux_0, axis=2))

    if display is True:
        plt.figure()
        plt.plot(wave_0, flux_out)
        plt.xlabel('Wavelength')
        plt.ylabel('Co-Added Flux')
        plt.show()

    return wave_0, flux_out
Example #7
0
def plotForce():
    figure(size=3,aspect=0.5)
    subplot(1,2,1)
    from EvalTraj import plotFF
    plotFF(vp=351,t=28,f=900,cm=0.6,foffset=8)
    subplot_annotate()
    
    subplot(1,2,2)
    for i in [1,2,3,4]:
        R=np.squeeze(np.load('Rdpse%d.npy'%i))
        R=stats.nanmedian(R,axis=2)[:,1:,:]
        dps=np.linspace(-1,1,201)[1:]
        plt.plot(dps,R[:,:,2].mean(0));
    plt.legend([0,0.1,0.2,0.3],loc=3) 
    i=2
    R=np.squeeze(np.load('Rdpse%d.npy'%i))
    R=stats.nanmedian(R,axis=2)[:,1:,:]
    mn=np.argmin(R,axis=1)
    y=np.random.randn(mn.shape[0])*0.00002+0.0438
    plt.plot(np.sort(dps[mn[:,2]]),y,'+',mew=1,ms=6,mec=[ 0.39  ,  0.76,  0.64])
    plt.xlabel('Displacement of Force Origin')
    plt.ylabel('Average Net Force Magnitude')
    hh=dps[mn[:,2]]
    err=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.975,hh.shape[0])
    err2=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.75,hh.shape[0])
    m=np.mean(hh)
    print m, m-err,m+err
    np.save('force',[m, m-err,m+err,m-err2,m+err2])
    plt.xlim([-0.5,0.5])
    plt.ylim([0.0435,0.046])
    plt.grid(b=True,axis='x')
    subplot_annotate()
    def __init__(self, endog, exog, sigma=None):
#TODO: add options igls, for iterative fgls if sigma is None
#TODO: default is sigma is none should be two-step GLS
        if sigma is not None:
            self.sigma = np.asarray(sigma)
        else:
            self.sigma = sigma
        if self.sigma is not None and not self.sigma.shape == (): #greedy logic
            nobs = int(endog.shape[0])
            if self.sigma.ndim == 1 or np.squeeze(self.sigma).ndim == 1:
                if self.sigma.shape[0] != nobs:
                    raise ValueError("sigma is not the correct dimension.  \
Should be of length %s, if sigma is a 1d array" % nobs)
            elif self.sigma.shape[0] != nobs and \
                    self.sigma.shape[1] != nobs:
                raise ValueError("expected an %s x %s array for sigma" % \
                        (nobs, nobs))
        if self.sigma is not None:
            nobs = int(endog.shape[0])
            if self.sigma.shape == ():
                self.sigma = np.diag(np.ones(nobs)*self.sigma)
            if np.squeeze(self.sigma).ndim == 1:
                self.sigma = np.diag(np.squeeze(self.sigma))
            self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(\
                    self.sigma)).T
        super(GLS, self).__init__(endog, exog)
Example #9
0
    def downsize(self, coefs, cut=None, verbose=True):
        """
        Given a set of coefs, sort the coefs and get rid of the bottom cut
        percent of variables with lowest cut coefs. Return the new coefs.
        """


        downsized_coefs = np.squeeze(np.array(coefs))

        if cut is None:
            cut = self.cut

        n_trash = int(floor(cut * self.n_features))

        if verbose:
            print("Downsampling...")
            print("Current shape:", self.Xview.shape)
            print("Removing {} columns... ".format(n_trash))


        self.tail_start -= n_trash

        if self.tail_start <= 0:
            raise ValueError("Trying to downsize more variables than present")

        # get sorted order of coefs
        csort = np.squeeze(np.argsort(np.argsort(np.absolute(coefs))))
        keep_feature = np.squeeze(csort >= n_trash)

        tail_start = self.tail_start

        # columns in the tail we want to keep
        keep_idx = np.squeeze(
            np.where(keep_feature[tail_start:tail_start+n_trash]))
        keep_idx += tail_start

        # columns we want to move to the tail
        trash_idx = np.squeeze(np.where(keep_feature[0:tail_start] == False))
        if len(trash_idx) != len(keep_idx):
            raise ValueError("trash_idx and keep_idx not the same length")

        # swap the columns
        for trash, keep in zip(trash_idx, keep_idx):
            #print(keep, trash)
            keep_col = self.X[:, keep].copy()
            self.X[:, keep] = self.X[:, trash]
            self.X[:, trash] = keep_col
            self.orig_feature_index[trash], self.orig_feature_index[keep] = self.orig_feature_index[keep], self.orig_feature_index[trash]
            downsized_coefs[trash], downsized_coefs[keep] = downsized_coefs[keep], downsized_coefs[trash]
            if self.test_subj is not None:
                self.X_test[:, (trash, keep)] = self.X_test[:, (keep, trash)]

        self.n_features -= n_trash
        self.Xview = self.X.view()[:, :self.n_features]
        if self.test_subj is not None:
            self.X_testview = self.X_test.view()[:, :self.n_features]

        print("New Xview shape:", self.Xview.shape)

        return downsized_coefs[:-n_trash]
Example #10
0
 def get_contents(self, height, width):
     """Returns the contents (pixel values) of both images of the pair as
     one numpy array.
     Args:
         height: Output height of each image.
         width: Output width of each image.
     Returns:
         Numpy array of shape (2, height, width) with dtype uint8.
     """
     img1 = self.image1.get_content()
     img2 = self.image2.get_content()
     if img1.shape[0] != height or img1.shape[1] != width:
         # imresize can only handle (height, width) or (height, width, 3),
         # not (height, width, 1), so squeeze away the last channel
         if IMAGE_CHANNELS == 1:
             img1 = misc.imresize(np.squeeze(img1), (height, width))
             img1 = img1[:, :, np.newaxis]
         else:
             img1 = misc.imresize(img1, (height, width))
     if img2.shape[0] != height or img2.shape[1] != width:
         if IMAGE_CHANNELS == 1:
             img2 = misc.imresize(np.squeeze(img2), (height, width))
             img2 = img2[:, :, np.newaxis]
         else:
             img2 = misc.imresize(img2, (height, width))
     return np.array([img1, img2], dtype=np.uint8)
def _field_gradient_jac(ref, target):
    """
    Given a reference field ref and a target field target
    compute the jacobian of the target with respect to ref

    Parameters
    ----------
    ref: Field instance that yields the topology of the space
    target array of shape(ref.V,dim)

    Results
    -------
    fgj: array of shape (ref.V) that gives the jacobian
         implied by the ref.field->target transformation.
    """
    import numpy.linalg as nl
    n = ref.V
    xyz = ref.field
    dim = xyz.shape[1]
    fgj = []
    ln = ref.list_of_neighbors()
    for i in range(n):
        j = ln[i]
        if np.size(j) > dim - 1:
            dx = np.squeeze(xyz[j] - xyz[i])
            df = np.squeeze(target[j] - target[i])
            FG = np.dot(nl.pinv(dx), df)
            fgj.append(nl.det(FG))
        else:
            fgj.append(1)

    fgj = np.array(fgj)
    return fgj
Example #12
0
def main():
    x = np.loadtxt(sys.argv[1], skiprows=1, delimiter=",")
    x = np.array(x)
    # separating the class 1 rows from class 2 rows
    indic = np.where(x[:,1] == 1)
    x_one = np.squeeze(x[indic,:])
    indic = np.where(x[:,1] == 2)
    x_two = np.squeeze(x[indic,:])
    
    m = np.loadtxt(sys.argv[2])
    m = np.array(m)
    var = np.loadtxt(sys.argv[3])
    var = np.array(var)
    w = np.loadtxt(sys.argv[4])
    w = np.array(w)
    its = sys.argv[5]
    its = np.int32(its)
    
    ll = gmmest(x_two[:,0], m, var, w, its)
    
    plt.plot(ll[3])
    plt.ylabel('log likelihood')
    plt.xlabel('iterations')
    plt.show()
    print "mu: ", ll[0], " sigmasq: ", ll[1], " wt: ", ll[2], " ll: ", ll[3]
Example #13
0
 def cos_distance(self, strike1, dip1, strike2, dip2):
     """Angular distance betwen the poles of two planes."""
     xyz1 = sph2cart(*mplstereonet.pole(strike1, dip1))
     xyz2 = sph2cart(*mplstereonet.pole(strike2, dip2))
     r1, r2 = np.linalg.norm(xyz1), np.linalg.norm(xyz2)
     dot = np.dot(np.squeeze(xyz1), np.squeeze(xyz2)) / r1 / r2
     return np.abs(np.degrees(np.arccos(dot)))
Example #14
0
    def reparametrization_LS_assembler(self):
        """In this function we compute the arclength reparametrization by mean of a Least Square
        problem."""

        s_array = np.linspace(0, self.points_s[-1,1], self.arcfactor * self.n_dofs)
        #self.point_ls = list()
        self.point_ls = np.asmatrix(np.zeros([s_array.shape[0],self.dim + 2]))
        tval = np.zeros(s_array.shape[0])
        sval = np.linspace(0,1,s_array.shape[0])
        for i in range(0, s_array.shape[0]):
            tval[i] = self.find_s(s_array[i])
            #rint tval
            # The curve should have a value( or __call__ if prefered) method that we can query to know its value in space
        self.point_ls[:,0:self.dim] = np.squeeze(self.curve(tval).transpose())
        self.rhsinit = np.squeeze(self.curve(tval).transpose())
        #self.point_ls[:,self.dim] = s_array[:,np.newaxis]
        self.point_ls[:,self.dim] = tval[:,np.newaxis]
            # In point_ls we store the value in space, the s_array and the tval obtained
            #self.point_ls.append([cval, s_array[i], tval])

        #self.point_ls = np.array(self.point_ls)
        # We compute the number of elements in the system rectangular matrix(Bmatrix), it will have dim*s_array.shape[0] rows and dim*nknot columns.
        # We want it to be rectangular because we are approximating its resilution so we search for something that solves the reparametrization in a
        # least square sense.
        #Bmatrixnumelem = self.dim * s_array.shape[0] * self.n_dofs * self.dim
        #self.matrixB = np.zeros(Bmatrixnumelem).reshape(self.dim * s_array.shape[0], self.n_dofs * self.dim)
        #self.rhsinit = np.zeros(self.dim * s_array.shape[0])
        self.matrixB = interpolation_matrix(self.vector_space, sval)
Example #15
0
 def visualize_ns_old(self, term, points=200):
     """
     Use randomly selected coordinates instead of most active
     """
     if term in self.no.term:
         term_index = self.no._ns['features_df'].columns.get_loc(term)
         rand_point_inds = np.random.random_integers(0, len(np.squeeze(zip(self.no._ns['mni_coords'].data))), points)
         rand_points = np.squeeze(zip(self.no._ns['mni_coords'].data))[rand_point_inds]
         weights = []
         inds_of_real_points_with_no_fucking_missing_study_ids = []
         for rand_point in range(len(rand_points)):
             if len(self.no.coord_to_ns_act(rand_points[rand_point].astype(list))) > 0:
                 inds_of_real_points_with_no_fucking_missing_study_ids.append(rand_point_inds[rand_point])
                 weights.append(self.no.coord_to_ns_act(rand_points[rand_point].astype(list))[term_index])
         fig = plt.figure()
         ax = fig.add_subplot(111, projection='3d')
         colors = cm.jet(weights/max(weights))
         color_map = cm.ScalarMappable(cmap=cm.jet)
         color_map.set_array(weights)
         fig.colorbar(color_map)
         x = self.no._ns['mni_coords'].data[inds_of_real_points_with_no_fucking_missing_study_ids, 0]
         y = self.no._ns['mni_coords'].data[inds_of_real_points_with_no_fucking_missing_study_ids, 1]
         z = self.no._ns['mni_coords'].data[inds_of_real_points_with_no_fucking_missing_study_ids, 2]
     else:
         raise ValueError('Term '+term + ' has not been initialized. '
                                         'Use get_ns_act(' + term + ')')
     ax.scatter(x, y, z, c=colors, alpha=0.4)
     ax.set_title('Estimation of ' + term)
Example #16
0
 def getEPSFourierCoeffs(self, wl, n, anisotropic=True):
     """Return the Fourier coefficients of eps and eps**-1, orders [-n,n]."""
     nood = 2 * n + 1
     hmax = nood - 1
     if not anisotropic:
         # isotropic
         rix1 = self.mat1.n(wl)
         rix2 = self.mat2.n(wl)
         f = self.dc
         h = numpy.arange(-hmax, hmax + 1)
         EPS = (rix1 ** 2 - rix2 ** 2) * f * \
             numpy.sinc(h * f) + rix2 ** 2 * (h == 0)
         EPS1 = (rix1 ** -2 - rix2 ** -2) * f * \
             numpy.sinc(h * f) + rix2 ** -2 * (h == 0)
         return EPS, EPS1
     else:
         # anisotropic
         EPS = numpy.zeros((3, 3, 2 * hmax + 1), dtype=complex)
         EPS1 = numpy.zeros_like(EPS)
         eps1 = numpy.squeeze(
             self.mat1.epsilonTensor(wl)) / EMpy.constants.eps0
         eps2 = numpy.squeeze(
             self.mat2.epsilonTensor(wl)) / EMpy.constants.eps0
         f = self.dc
         h = numpy.arange(-hmax, hmax + 1)
         for ih, hh in enumerate(h):
             EPS[:, :, ih] = (eps1 - eps2) * f * \
                 numpy.sinc(hh * f) + eps2 * (hh == 0)
             EPS1[:, :, ih] = (
                 scipy.linalg.inv(eps1) - scipy.linalg.inv(eps2)
             ) * f * numpy.sinc(hh * f) + scipy.linalg.inv(eps2) * (hh == 0)
         return EPS, EPS1
Example #17
0
def generate_ic_grid(dR=0.1*u.kpc, dRdot=5.*u.km/u.s):
    # spacing between IC's in R and Rdot
    dR = dR.decompose(usys).value
    dRdot = dRdot.decompose(usys).value
    max_Rdot = (50*10*u.km/u.s).decompose(usys).value
    max_R = (15*u.kpc).decompose(usys).value

    # from the paper
    E = (600*100*(u.km/u.s)**2).decompose(usys).value
    Lz = (10.*10.*u.km*u.kpc/u.s).decompose(usys).value # typo in paper? km/kpc instead of km*kpc
    z = 0.
    params = oblate_params

    w0s = []
    for R in np.arange(0, max_R, dR):
        # zero velocity curve
        V = zotos_potential(R, z, *params)
        ZVC_Rdot = np.squeeze(np.sqrt(2*(E-V) - Lz**2/R**2))
        for Rdot in np.arange(0, max_Rdot, dRdot):
            if Rdot > ZVC_Rdot or R < 0.2 or R >= 13: continue
            zdot = np.squeeze(np.sqrt(2*(E - V) - Lz**2/R**2 - Rdot**2))
            w0 = [R,z,Rdot,zdot]
            w0s.append(w0)
    w0s = np.array(w0s)
    return w0s, Lz
Example #18
0
def weightMatrix(mtx3d,bgq):
    """
    Calculation of weight tensor, which replaces unity values in
    mtx3d with the positions relative entropy. 

    Helper function of Rama Ranganathan MATLAB sca5 function. 
    """
    nseq,npos,naa = mtx3d.shape
    
    mtx3d_mat = np.reshape(mtx3d.transpose(2,1,0),(naa*npos,nseq),order='F')
    f1_v =np.sum(mtx3d_mat.T,axis=0)/nseq
    w_v = np.squeeze(np.ones((1,naa*npos)))
    q1_v = np.squeeze(np.tile(bgq,(1,npos)))

    for x in range(naa*npos):
        q = q1_v[x]
        f = f1_v[x]
        # here I hard coded their metric DerivEntropy
        if f > 0 and f < 1:
            w_v[x] = np.abs(np.log(f*(1-q)/(q*(1-f))))
        else: 
            w_v[x] = 0.

    W = np.zeros((npos,naa))
    for i in range(npos):
        for j in range(naa):
            W[i,j] = w_v[naa*i+j]
            
    Wx = np.tile(np.reshape(W,(1, npos, naa),order='F'),(nseq,1,1))*mtx3d

    return Wx,W
 def Load_Answer(self, Str_DataName, Int_DataNum):
     if Str_DataName == "PPG_KW_long":
         Str_AnnoName = "../Data/" + str(Int_DataNum) + "_Anno.txt"
         List_Anno = file(Str_AnnoName,'r').read()
         List_Anno = List_Anno.split("\n")
         List_Anno = [int(x) for x in List_Anno]
         Array_Anno = np.array(List_Anno)
         Array_Anno = np.unique(Array_Anno)
     elif Str_DataName == "PPG_Walk":
         Str_AnnoName = "../Data/" + Str_DataName + str(Int_DataNum)+ "_Anno.txt"
         List_Anno = file(Str_AnnoName,'r').read()
         List_Anno = List_Anno.split("\n")
         List_Anno = [int(x) for x in List_Anno]
         Array_Anno = np.array(List_Anno)
         Array_Anno = np.unique(Array_Anno)
     elif Str_DataName == "PPG_Label":
         Str_DataPathABP = "../Data/BeatDetection/ABP"
         Str_DataPathICP = "../Data/BeatDetection/ICP"
         MatFile_ABP = scipy.io.loadmat(Str_DataPathABP)
         Int_CutIdx = 125*3600
         if Int_DataNum == 1:
             Array_Anno = np.squeeze(np.array(MatFile_ABP['dDT1']))
             Array_Anno = np.array([int(val) for val in Array_Anno if val < Int_CutIdx])
         elif Int_DataNum == 2:
             Array_Anno = np.squeeze(np.array(MatFile_ABP['dDT2']))
             Array_Anno = np.array([int(val) for val in Array_Anno if val < Int_CutIdx])
     return Array_Anno
Example #20
0
 def ll2ij(self, lon, lat, mask=None, cutoff=None, nei=1, all_nei=False,
           return_dist=False):
     """Reproject a lat-lon vector to i-j grid coordinates"""
     self.add_ij()
     if mask is not None:
         self.add_kd(mask)
     elif not hasattr(self, 'kd'):
         self.add_kd()
     dist,ij = self.kd.query(list(np.vstack((lon,lat)).T), nei)
     #if cutoff is not None:
     #    ij[dist>cutoff] = 0
     if nei == 1 :
         ivec = self.kdijvec[ij[:] - 1][:, 0]
         jvec = self.kdijvec[ij[:] - 1][:, 1]
         if cutoff is not None:
             ivec[dist>cutoff] = -999
             jvec[dist>cutoff] = -999
     elif all_nei == False:
         ivec = np.squeeze(self.kdijvec[ij[:,:]-1])[:, nei-1, 0]
         jvec = np.squeeze(self.kdijvec[ij[:,:]-1])[:, nei-1, 1]
         dist = np.squeeze(dist[:, nei-1])
     else:
         ivec = np.squeeze(self.kdijvec[ij[:,:]-1])[:, :, 0]
         jvec = np.squeeze(self.kdijvec[ij[:,:]-1])[:, :, 1]
     if return_dist == True:
         return ivec,jvec,dist
     else:
         return ivec,jvec
Example #21
0
def value_for_all(estimator,N):
    from scipy.sparse import csr_matrix
    ch_left = estimator.tree_.children_left
    ch_right = estimator.tree_.children_right
    (cl,) = np.where(ch_left!=-1)
    (cr,) = np.where(ch_right!=-1)
    cap = estimator.tree_.capacity
    dis_node = np.zeros((cap,estimator.tree_.n_classes))
    A = np.zeros([cap,cap])
    D = A
    A = csr_matrix(A)
    A[cl,ch_left[cl]] = 1
    A[cr,ch_right[cr]] = 1
    B = A
    C = B
    while(C.sum()!=0):
        C = A*C
        B = B + C
    I,J = B.nonzero()
    D[I,J] = 1
    (I,) = np.where(ch_left==-1)
    dis_node[I,:] = np.squeeze(estimator.tree_.value[I])
    for i in I:
        dis_node[i,:] = dis_node[i,:]/dis_node[i,:].sum()
    (remain1,) = np.where(ch_left!=-1)
    for i in remain1:
        (I,) = np.where(D[i,:]==1)
        dis_node[i,:] = np.sum(np.squeeze(estimator.tree_.value[I]),axis = 0)
        dis_node[i,:] = dis_node[i,:]/dis_node[i,:].sum()
    Dis_node = np.zeros((cap,N))
    Dis_node[:,estimator.classes_.astype(int)] = dis_node
    return Dis_node
Example #22
0
def sdss_source_props_ota(img,ota):
    """
    Use photutils to get the elongation of all of the sdss sources
    can maybe use for point source filter
    Also fit a gaussian along a row and col of pixels passing
    through the center of the star
    """

    image = odi.reprojpath+'reproj_'+ota+'.'+str(img[16:])
    hdulist = odi.fits.open(image)
    data = hdulist[0].data

    sdss_source_file = odi.coordspath+'reproj_'+ota+'.'+str(img[16:-5])+'.sdssxy'

    x,y,ra,dec,g,g_err,r,r_err = np.loadtxt(sdss_source_file,usecols=(0,1,2,3,
                                                                      6,7,8,9),unpack=True)

    box_centers = zip(y,x)
    box_centers = np.reshape(box_centers,(len(box_centers),2))
    source_dict = {}
    total_fwhm = []
    for i,center in enumerate(box_centers):
        x1 = center[0]-50
        x2 = center[0]+50
        y1 = center[1]-50
        y2 = center[1]+50

        #print x1,x2,y1,y2,center
        box = data[x1:x2,y1:y2]
        col = data[x1:x2,int(center[1]-0.5):int(center[1]+0.5)]
        row = data[int(center[0]-0.5):int(center[0]+0.5),y1:y2]
        row = np.squeeze(row) - np.median(row)
        col = np.squeeze(col) - np.median(col)
        g_init = models.Gaussian1D(amplitude=250., mean=50, stddev=2.)
        fit_g = fitting.LevMarLSQFitter()
        pix = np.linspace(0,100,num=100)
        g_row = fit_g(g_init, pix, row)
        g_col = fit_g(g_init, pix, col)
        mean_fwhm = 0.5*(g_row.stddev*2.355+g_col.stddev*2.355)
        total_fwhm.append(mean_fwhm)
        #odi.plt.imshow(box)
        #odi.plt.plot(row)
        #odi.plt.plot(pix,g(pix))
        #plt.imshow(row2)
        #plt.show()
        mean, median, std = odi.sigma_clipped_stats(box, sigma=3.0)
        threshold = median + (std * 2.)
        segm_img = odi.detect_sources(box, threshold, npixels=20)
        source_props = odi.source_properties(box,segm_img)
        if len(source_props) > 0:
            columns = ['xcentroid', 'ycentroid','elongation','semimajor_axis_sigma','semiminor_axis_sigma']
            if i == 0:
                source_tbl = odi.properties_table(source_props,columns=columns)
            else:
                source_tbl.add_row((source_props[0].xcentroid,source_props[0].ycentroid,
                                    source_props[0].elongation,source_props[0].semimajor_axis_sigma,
                                    source_props[0].semiminor_axis_sigma))
    elong_med,elong_std = np.median(source_tbl['elongation']),np.std(source_tbl['elongation'])
    hdulist.close()
    return elong_med,elong_std,np.mean(total_fwhm),np.std(total_fwhm)
Example #23
0
    def __init__(self, timber_variable_bbq, beam=0):

        if not (beam == 1 or beam == 2):
            raise ValueError('You need to specify which beam! (1 or 2)')
        

        if type(timber_variable_bbq) is dict:
            dict_timber = timber_variable_bbq
        
        self.beam = beam
        
        self.amp_1 = np.squeeze(np.array(
            dict_timber['LHC.BQBBQ.CONTINUOUS_HS.B{:d}:EIGEN_AMPL_1'.format(beam)][1]))
        self.amp_2  = np.squeeze(np.array(
            dict_timber['LHC.BQBBQ.CONTINUOUS_HS.B{:d}:EIGEN_AMPL_2'.format(beam)][1]))
        
        self.xamp_1 = np.squeeze(np.array(
            dict_timber['LHC.BQBBQ.CONTINUOUS_HS.B{:d}:EIGEN_X_AMPL_1'.format(beam)][1]))
        self.xamp_2 = np.squeeze(np.array(
            dict_timber['LHC.BQBBQ.CONTINUOUS_HS.B{:d}:EIGEN_X_AMPL_2'.format(beam)][1]))
        
        self.qh  = dict_timber['LHC.BQBBQ.CONTINUOUS_HS.B{:d}:TUNE_H'.format(beam)][1]
        self.qv  = dict_timber['LHC.BQBBQ.CONTINUOUS_HS.B{:d}:TUNE_V'.format(beam)][1]
        
        self.q1  = dict_timber['LHC.BQBBQ.CONTINUOUS_HS.B{:d}:EIGEN_FREQ_1'.format(beam)][1]
        self.q2  = dict_timber['LHC.BQBBQ.CONTINUOUS_HS.B{:d}:EIGEN_FREQ_2'.format(beam)][1]
        
        self.t_stamps = np.ravel(np.squeeze(np.array(
            dict_timber['LHC.BQBBQ.CONTINUOUS_HS.B{:d}:EIGEN_AMPL_1'.format(beam)][0])))
        
        self.t_str=[datetime.datetime.fromtimestamp(self.t_stamps[ii]) for ii in np.arange(len(self.t_stamps))]
Example #24
0
    def __init__(self, complete_path):

        if complete_path.endswith('.mat.gz'):
            temp_filename = complete_path.split('.gz')[0]
            with open(temp_filename, "wb") as tmp:
                shutil.copyfileobj(gzip.open(complete_path), tmp)
            dict_mr = sio.loadmat(temp_filename)
            os.remove(temp_filename)
        elif complete_path.endswith('.mat'):
            dict_mr = sio.loadmat(complete_path)
        else:
            print('Unknown file extension for MountainRange file. Should be ' +
                  '.mat or .mat.gz')
        self.value = dict_mr['value']
        self.trigger_stamp = dict_mr['triggerStamp']
        self.SC_numb = np.int(np.squeeze(dict_mr['superCycleNb']))
        self.first_trigger_t_stamp_unix = dict_mr['first_trigger_t_stamp_unix']
        self.sample_interval = float(np.squeeze(dict_mr['sampleInterval']))
        self.first_sample_time = dict_mr['firstSampleTime']
        self.sensitivity = dict_mr['sensitivity']
        self.offset = dict_mr['offset']
        self.SPSuser = dict_mr['SPSuser']
        self.t_stamp_unix = dict_mr['t_stamp_unix']

        self.time_axis = np.float_(range(self.value.shape[1]))*self.sample_interval-self.value.shape[1]*self.sample_interval/2.
Example #25
0
def kalman(x, u, P, A, B, C, W, V, z=np.NaN):
    """
    This function returns an optimal expected value of the state and covariance
    error matrix given an update and system parameters.

    x:   Estimate of state at time t-1.
    u:   Input at time t-1.
    P:   Estimate of error covariance matrix at time t-1.
    A:   Discrete time state tranistion matrix at time t-1.
    B:   Input to state model matrix at time t-1.
    C:   Observation model matrix at time t.
    W:   Process noise covariance at time t-1.
    V:   Measurement noise covariance at time t.
    z:   Measurement at time t.

    returns: (x,P) tuple
    x: Updated estimate of state at time t.
    P: Updated estimate of error covariance matrix at time t.

    """

    x = np.atleast_2d(x)
    u = np.atleast_2d(u)
    P = np.atleast_2d(P)
    A = np.atleast_2d(A)
    B = np.atleast_2d(B)
    x_p = np.dot(A, x) + np.dot(B, u)  # Prediction of estimated state vector
    P_p = np.dot(A, np.dot(P, A.T)) + W  # Prediction of error covariance matrix

    if np.any(np.isnan(z)):
        return (x_p, P_p)
    else:
        C = np.atleast_2d(C)
        W = np.atleast_2d(W)
        V = np.atleast_2d(V)
        z = np.atleast_2d(z)

        [M, N] = np.shape(C)

        if W.shape[0] == 1 or W.shape[1] == 1:
            W = np.diag(np.squeeze(W))

        if (V.shape[0] == 1 or V.shape[1] == 1) and not (V.shape[0] == 1 and V.shape[1] == 1):
            V = np.diag(np.squeeze(V))

        I = np.eye(N)  # N x N identity matrix

        S = np.dot(C, np.dot(P_p, C.T)) + V  # Sum of error variances
        S_inv = np.linalg.inv(S)  # Inverse of sum of error variances
        K = np.dot(P_p, np.dot(C.T, S_inv))  # Kalman gain
        r = z - np.dot(C, x_p)  # Prediction residual
        w = np.dot(-K, r)  # Process error
        x = x_p - w  # Update estimated state vector
        # v = z - np.dot(C, x)  # Measurement error
        if np.any(np.isnan(np.dot(K, V))):
            P = P_p
        else:
            # Updated error covariance matrix
            P = np.dot((I - np.dot(K, C)), np.dot(P_p, (I - np.dot(K, C)).T)) + np.dot(K, np.dot(V, K.T))
        return (x, P)
Example #26
0
def gray2rgb(image):
    """Create an RGB representation of a gray-level image.

    Parameters
    ----------
    image : array_like
        Input image of shape ``(M, N [, P])``.

    Returns
    -------
    rgb : ndarray
        RGB image of shape ``(M, N, [, P], 3)``.

    Raises
    ------
    ValueError
        If the input is not a 2- or 3-dimensional image.

    """
    if np.squeeze(image).ndim == 3 and image.shape[2] in (3, 4):
        return image
    elif image.ndim != 1 and np.squeeze(image).ndim in (1, 2, 3):
        image = image[..., np.newaxis]
        return np.concatenate(3 * (image,), axis=-1)
    else:
        raise ValueError("Input image expected to be RGB, RGBA or gray.")
  def testTrainNetwork(self, distribution, optimizer_fn,
                       use_callable_loss=True):
    with distribution.scope():
      model_fn, dataset_fn, layer = minimize_loss_example(
          optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss)
      iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn())

      def run_step():
        return control_flow_ops.group(
            distribution.experimental_local_results(
                distribution.extended.call_for_each_replica(
                    model_fn, args=(iterator.get_next(),))))

      if not context.executing_eagerly():
        with self.cached_session() as sess:
          sess.run(iterator.initialize())
          run_step = sess.make_callable(run_step())
        self.evaluate(variables.global_variables_initializer())

      weights, biases = [], []
      for _ in range(10):
        run_step()

        weights.append(self.evaluate(layer.kernel))
        biases.append(self.evaluate(layer.bias))

      error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
      is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
      self.assertTrue(is_not_increasing)
Example #28
0
 def action_value(self, obs):
     # executes call() under the hood
     logits, value = self.predict(obs)
     action = self.dist.predict(logits)
     # a simpler option, will become clear later why we don't use it
     # action = tf.random.categorical(logits, 1)
     return np.squeeze(action, axis=-1), np.squeeze(value, axis=-1)
Example #29
0
def plsurf(z,x=None,y=None,win=None,shade=0,edges=1,edge_color='fg',phi=-45.0,
         theta=30.0,zscale=1.0,palette=None,gnomon=0,animate=False,limits=True, ireg=None):
  '''Plot a 3-D wire-frame surface z=f(x,y)
  '''
  if win is None:
    pass
    #pl3d.window3()
  else:
    pl3d.window3(win)
  pl3d.set_draw3_(0)
  phi0 = phi*numpy.pi/180.0
  theta0 = theta*numpy.pi/180.0
  pl3d.orient3(phi=phi0,theta=theta0)
  pl3d.light3()
  _change_palette(palette)
  sz = numpy.shape(z)
  if len(sz) != 2:
    raise ValueError('Input must be a 2-d array --- a surface.')
  N,M = sz
  if x is None:
    x = numpy.arange(0,N)
  if y is None:
    y = numpy.arange(0,M)
  x = numpy.squeeze(x)
  y = numpy.squeeze(y)
  if (len(numpy.shape(x)) == 1):
    x = x[:,newaxis]*numpy.ones((1,M))
  if (len(numpy.shape(y)) == 1):
    y = numpy.ones((N,1))*y[newaxis,:]
  plwf.plwf(z,y,x,shade=shade,edges=edges,ecolor=edge_color,scale=zscale, ireg=ireg)
  # if animate, the application is responsible to fma
  lims = pl3d.draw3(not animate)
  if limits:
    gist.limits(lims[0],lims[1],lims[2],lims[3])
  pl3d.gnomon(gnomon)
Example #30
0
def plot_animation_each_neuron(name_s, save_name, print_loss=False):
    """Plot the movie for all the networks in the information plane"""
    # If we want to print the loss function also
    #The bins that we extened the x axis of the accuracy each time
    epochs_bins = [0, 500, 1500, 3000, 6000, 10000, 20000]
    data_array = utils.get_data(name_s[0][0])
    data = np.squeeze(data_array['information'])

    f, (axes) = plt.subplots(1, 1)
    axes = [axes]
    f.subplots_adjust(left=0.14, bottom=0.1, right=.928, top=0.94, wspace=0.13, hspace=0.55)
    colors = LAYERS_COLORS
    #new/old version
    Ix = np.squeeze(data[0,:, :, :])
    Iy = np.squeeze(data[1,:, :, :])
    #Interploation of the samplings (because we don't cauclaute the infomration in each epoch)
    #interp_data_x = interp1d(epochsInds,  Ix, axis=1)
    #interp_data_y = interp1d(epochsInds,  Iy, axis=1)
    #new_x = np.arange(0,epochsInds[-1])
    #new_data  = np.array([interp_data_x(new_x), interp_data_y(new_x)])
    """"
    train_data = interp1d(epochsInds,  np.squeeze(train_data), axis=1)(new_x)
    test_data = interp1d(epochsInds,  np.squeeze(test_data), axis=1)(new_x)

    if print_loss:
        loss_train_data =  interp1d(epochsInds,  np.squeeze(loss_train_data), axis=1)(new_x)
        loss_test_data=interp1d(epochsInds,  np.squeeze(loss_test_data), axis=1)(new_x)
    """
    line_ani = animation.FuncAnimation(f, update_line_each_neuron, Ix.shape[1], repeat=False,
                                       interval=1, blit=False, fargs=(print_loss, Ix, axes,Iy,train_data,test_data,epochs_bins, loss_train_data,loss_test_data, colors,epochsInds))
    Writer = animation.writers['ffmpeg']
    writer = Writer(fps=100)
    #Save the movie
    line_ani.save(save_name+'_movie.mp4',writer=writer,dpi=250)
    plt.show()
Example #31
0
def normalize_weight_shape(w: np.ndarray, n_samples: int,
                           n_tasks: int) -> np.ndarray:
    """A utility function to correct the shape of the weight array.

  This utility function is used to normalize the shapes of a given
  weight array.

  Parameters
  ----------
  w: np.ndarray
    `w` can be `None` or a scalar or a `np.ndarray` of shape
    `(n_samples,)` or of shape `(n_samples, n_tasks)`. If `w` is a
    scalar, it's assumed to be the same weight for all samples/tasks.
  n_samples: int
    The number of samples in the dataset. If `w` is not None, we should
    have `n_samples = w.shape[0]` if `w` is a ndarray
  n_tasks: int
    The number of tasks. If `w` is 2d ndarray, then we should have
    `w.shape[1] == n_tasks`.

  Examples
  --------
  >>> import numpy as np
  >>> w_out = normalize_weight_shape(None, n_samples=10, n_tasks=1)
  >>> (w_out == np.ones((10, 1))).all()
  True

  Returns
  -------
  w_out: np.ndarray
    Array of shape `(n_samples, n_tasks)`
  """
    if w is None:
        w_out = np.ones((n_samples, n_tasks))
    elif isinstance(w, np.ndarray):
        if len(w.shape) == 0:
            # scalar case
            w_out = w * np.ones((n_samples, n_tasks))
        elif len(w.shape) == 1:
            if len(w) != n_samples:
                raise ValueError("Length of w isn't n_samples")
            # per-example case
            # This is a little arcane but it repeats w across tasks.
            w_out = np.tile(w, (n_tasks, 1)).T
        elif len(w.shape) == 2:
            if w.shape == (n_samples, 1):
                # If w.shape == (n_samples, 1) handle it as 1D
                w = np.squeeze(w, axis=1)
                w_out = np.tile(w, (n_tasks, 1)).T
            elif w.shape != (n_samples, n_tasks):
                raise ValueError(
                    "Shape for w doens't match (n_samples, n_tasks)")
            else:
                # w.shape == (n_samples, n_tasks)
                w_out = w
        else:
            raise ValueError("w must be of dimension 1, 2, or 3")
    else:
        # scalar case
        w_out = w * np.ones((n_samples, n_tasks))
    return w_out
Example #32
0
# load packages
import numpy as np
import scipy.stats as st
from matplotlib import pyplot as plt
import matplotlib as mpl
mpl.rc('font', size=16,
       weight='bold')  #set default font size and weight for plots

# We will load in the NINO3.4 ENSO index from Jan. 1958 - Dec. 2019.

# In[2]:

# load data
filename = 'NINO34_monthly_Jan1958_Dec2019.csv'
ENSO = np.squeeze(np.genfromtxt(filename, delimiter=','))

# let's also reshape ENSO into one long time series
Ny, Nm = ENSO.shape
ENSO = np.reshape(ENSO, Ny * Nm)

# What does this time series look like? Can we see different time scales of variability in the data?

# In[3]:

# plot ENSO time series
plt.figure(figsize=(12, 5))
plt.plot(ENSO)
plt.title('ENSO Time Series (1958-2019)')
plt.xlabel('Months')
plt.xlim(0, len(ENSO))
Example #33
0
        skiprows=1)
    if True:
        from lib.pair_matching import RT_transform

        print("trans: {}".format(pose_src[:, -1]))
        print("euler: {}".format(RT_transform.mat2euler(pose_src[:, :3])))

    pose_tgt = np.loadtxt(
        "/home/yili/PoseEst/render_pangolin/synthesize/train/002_master_chef_can/{}_pose.txt"
        .format(idx2),
        skiprows=1)
    K = np.array([[1066.778, 0, 312.9869], [0, 1067.487, 241.3109], [0, 0, 1]])
    t = time()
    flow, visible = calc_flow(depth_src, pose_src, pose_tgt, K, depth_tgt)
    print(time() - t)
    a = np.where(np.squeeze(visible[:, :]))
    print(a[0][:20])
    print(a[1][:20])
    import matplotlib.pyplot as plt

    fig = plt.figure()
    plt.axis("off")
    fig.add_subplot(2, 4, 1)
    plt.imshow(im_src)
    fig.add_subplot(2, 4, 2)
    plt.imshow(im_tgt)
    fig.add_subplot(2, 4, 3)
    plt.imshow(depth_src)
    fig.add_subplot(2, 4, 4)
    plt.imshow(depth_tgt)
Example #34
0
st.title('Neural Network Visualizer')
st.sidebar.markdown('# Input Image')

if st.button('Get random predictions'):
    response = requests.post(URL, data={})
    # print(response.text)
    response = json.loads(response.text)
    preds = response.get('prediction')
    image = response.get('image')
    image = np.reshape(image, (28, 28))

    st.sidebar.image(image, width=150)

    for layer, p in enumerate(preds):
        numbers = np.squeeze(np.array(p))

        plt.figure(figsize=(32, 4))

        if layer == 2:
            row = 1
            col = 10
        else:
            row = 2
            col = 16

        for i, number in enumerate(numbers):
            plt.subplot(row, col, i + 1)
            plt.imshow((number * np.ones((8, 8, 3))).astype('float32'), cmap='binary')
            plt.xticks([])
            plt.yticks([])
chkeps=1

#define periodic dof by single cell
dof=np.zeros(ny*nx*numlayers)
for iy in range(ny):
    for ix in range(nx):
        for il in range(numlayers):
            i=il + numlayers*ix + numlayers*nx*iy
            if (ix-nx/2)**2 + (iy-ny/2)**2 <= 1000**2:
                #dof[i]=round(np.random.rand())
                #dof[i]=np.random.rand()
                dof[i]=0.5
fid=hp.File('dof_singlecell.h5','w')
tmp=dof.reshape((ny,nx,numlayers))
for il in range(numlayers):
    fid.create_dataset('layer'+str(il),data=np.squeeze(tmp[:,:,il]))
fid.close()
tmp=dof
dof=np.array([])
for icy in range(numcells_y):
    for icx in range(numcells_x):
        if dof.size==0:
            dof=tmp
        else:
            dof=np.concatenate((dof,tmp))
np.savetxt(init_filename,dof)

#############################################################################################################

iz=[0,mpmlz[0]+pml2src+src2stk]
for i in range(numlayers-1):
    ## ======= Evaluate  ======= ##
    ## ========================= ##

    if evaluate:

        raw_dataset = load_nodule_raw_dataset(size=size,
                                              res=res,
                                              sample=sample)[DataSubSet]

        failed_same, failed_diff = [], []
        K = 12

        p = np.zeros(K)
        r = np.zeros(K)
        for t in range(K):
            p[t] = precision(labels_test, np.squeeze(pred), 0.1 * t)
            r[t] = recall(labels_test, np.squeeze(pred), 0.1 * t)
        plt.figure()
        plt.title('PR Curve')
        plt.plot(r, p)
        plt.ylabel('Precision')
        plt.xlabel('Recall')
        plt.axis([0, 1, 0, 1])

        diff_len = np.count_nonzero(labels_test)
        same_len = len(labels_test) - diff_len

        d, s = [], []
        for l, p, m1, m2 in zip(labels_test, pred, meta[0],
                                meta[1]):  # similarity labels
            if l:
if __name__ == "__main__":
    start = time.time()

    # 定义感知机
    # n_iter_no_change表示迭代次数,eta0表示学习率,shuffle表示是否打乱数据集
    clf = Perceptron(n_iter_no_change=30, eta0=0.0001, shuffle=False)
    # 使用训练数据进行训练
    train_data_matrix, train_label_matrix = loadData(
        "../MnistData/mnist_train.csv")
    test_data_matrix, test_label_matrix = loadData(
        "../MnistData/mnist_test.csv")

    print(train_data_matrix.shape)
    print(test_data_matrix.shape)

    train_label_matrix = np.squeeze(train_label_matrix)
    test_label_matrix = np.squeeze(test_label_matrix)

    print(train_label_matrix.shape)
    print(test_label_matrix.shape)

    # 训练模型
    clf.fit(train_data_matrix, train_label_matrix)

    # 利用测试集进行验证,得到模型在测试集上的准确率
    accuracy = clf.score(test_data_matrix, test_label_matrix)

    end = time.time()
    print(f"accuracy is {accuracy}.")
    print(f"the total time is {end - start}.")
Example #38
0
def normalize_labels_shape(y: np.ndarray,
                           mode: Optional[str] = None,
                           n_tasks: Optional[int] = None,
                           n_classes: Optional[int] = None) -> np.ndarray:
    """A utility function to correct the shape of the labels.

  Parameters
  ----------
  y: np.ndarray
    `y` is an array of shape `(N,)` or `(N, n_tasks)` or `(N, n_tasks, 1)`.
  mode: str, default None
    If `mode` is "classification" or "regression", attempts to apply
    data transformations.
  n_tasks: int, default None
    The number of tasks this class is expected to handle.
  n_classes: int, default None
    If specified use this as the number of classes. Else will try to
    impute it as `n_classes = max(y) + 1` for arrays and as
    `n_classes=2` for the case of scalars. Note this parameter only
    has value if `mode=="classification"`

  Returns
  -------
  y_out: np.ndarray
    If `mode=="classification"`, `y_out` is an array of shape `(N,
    n_tasks, n_classes)`. If `mode=="regression"`, `y_out` is an array
    of shape `(N, n_tasks)`.
  """
    if n_tasks is None:
        raise ValueError("n_tasks must be specified")
    if mode not in ["classification", "regression"]:
        raise ValueError("mode must be either classification or regression.")
    if mode == "classification" and n_classes is None:
        raise ValueError("n_classes must be specified")
    if not isinstance(y, np.ndarray):
        raise ValueError("y must be a np.ndarray")
    # Handle n_classes/n_task shape ambiguity
    if mode == "classification" and len(y.shape) == 2:
        if n_classes == y.shape[1] and n_tasks != 1 and n_classes != n_tasks:
            raise ValueError("Shape of input doesn't match expected n_tasks=1")
        elif n_classes == y.shape[1] and n_tasks == 1:
            # Add in task dimension
            y = np.expand_dims(y, 1)
    if len(y.shape) == 1 and n_tasks != 1:
        raise ValueError("n_tasks must equal 1 for a 1D set of labels.")
    if (len(y.shape) == 2 or len(y.shape) == 3) and n_tasks != y.shape[1]:
        raise ValueError("Shape of input doesn't match expected n_tasks=%d" %
                         n_tasks)
    if len(y.shape) >= 4:
        raise ValueError(
            "Labels y must be a float scalar or a ndarray of shape `(N,)` or "
            "`(N, n_tasks)` or `(N, n_tasks, 1)` for regression problems and "
            "of shape `(N,)` or `(N, n_tasks)` or `(N, n_tasks, 1)` for classification problems"
        )
    if len(y.shape) == 1:
        # Insert a task dimension (we know n_tasks=1 from above0
        y_out = np.expand_dims(y, 1)
    elif len(y.shape) == 2:
        y_out = y
    elif len(y.shape) == 3:
        # If 3D and last dimension isn't 1, assume this is one-hot encoded and return as-is.
        if y.shape[-1] != 1:
            return y
        y_out = np.squeeze(y, axis=-1)
    # Handle classification. We need to convert labels into one-hot representation.
    if mode == "classification":
        all_y_task = []
        for task in range(n_tasks):
            y_task = y_out[:, task]
            # check whether n_classes is int or not
            assert isinstance(n_classes, int)
            y_hot = to_one_hot(y_task, n_classes=n_classes)
            y_hot = np.expand_dims(y_hot, 1)
            all_y_task.append(y_hot)
        y_out = np.concatenate(all_y_task, axis=1)
    return y_out
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *

%matplotlib inline
np.random.seed(1)

# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()

# Example of a picture
index = 6
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))

X_train = X_train_orig/255.
X_test = X_test_orig/255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
conv_layers = {}

# GRADED FUNCTION: create_placeholders
Example #40
0
def normalize_prediction_shape(y: np.ndarray,
                               mode: Optional[str] = None,
                               n_tasks: Optional[int] = None,
                               n_classes: Optional[int] = None):
    """A utility function to correct the shape of provided predictions.

  The metric computation classes expect that inputs for classification
  have the uniform shape `(N, n_tasks, n_classes)` and inputs for
  regression have the uniform shape `(N, n_tasks)`. This function
  normalizes the provided input array to have the desired shape.

  Examples
  --------
  >>> import numpy as np
  >>> y = np.random.rand(10)
  >>> y_out = normalize_prediction_shape(y, "regression", n_tasks=1)
  >>> y_out.shape
  (10, 1)

  Parameters
  ----------
  y: np.ndarray
    If `mode=="classification"`, `y` is an array of shape `(N,)` or
    `(N, n_tasks)` or `(N, n_tasks, n_classes)`. If
    `mode=="regression"`, `y` is an array of shape `(N,)` or `(N,
    n_tasks)`or `(N, n_tasks, 1)`.
  mode: str, default None
    If `mode` is "classification" or "regression", attempts to apply
    data transformations.
  n_tasks: int, default None
    The number of tasks this class is expected to handle.
  n_classes: int, default None
    If specified use this as the number of classes. Else will try to
    impute it as `n_classes = max(y) + 1` for arrays and as
    `n_classes=2` for the case of scalars. Note this parameter only
    has value if `mode=="classification"`

  Returns
  -------
  y_out: np.ndarray
    If `mode=="classification"`, `y_out` is an array of shape `(N,
    n_tasks, n_classes)`. If `mode=="regression"`, `y_out` is an array
    of shape `(N, n_tasks)`.
  """
    if n_tasks is None:
        raise ValueError("n_tasks must be specified")
    if mode == "classification" and n_classes is None:
        raise ValueError("n_classes must be specified")
    if not isinstance(y, np.ndarray):
        raise ValueError("y must be a np.ndarray")
    # Handle n_classes/n_task shape ambiguity
    if mode == "classification" and len(y.shape) == 2:
        if n_classes == y.shape[1] and n_tasks != 1 and n_classes != n_tasks:
            raise ValueError("Shape of input doesn't match expected n_tasks=1")
        elif n_classes == y.shape[1] and n_tasks == 1:
            # Add in task dimension
            y = np.expand_dims(y, 1)
    if (len(y.shape) == 2 or len(y.shape) == 3) and n_tasks != y.shape[1]:
        raise ValueError("Shape of input doesn't match expected n_tasks=%d" %
                         n_tasks)
    if len(y.shape) >= 4:
        raise ValueError(
            "Predictions y must be a float scalar or a ndarray of shape `(N,)` or "
            "`(N, n_tasks)` or `(N, n_tasks, 1)` for regression problems and "
            "of shape `(N,)` or `(N, n_tasks)` or `(N, n_tasks, n_classes)` for classification problems"
        )
    if mode == "classification":
        if n_classes is None:
            raise ValueError("n_classes must be specified.")
        if len(y.shape) == 1 or len(y.shape) == 2:
            # Make everything 2D so easy to handle
            if len(y.shape) == 1:
                y = y[:, np.newaxis]
            # Handle each task separately.
            all_y_task = []
            for task in range(n_tasks):
                y_task = y[:, task]
                if len(np.unique(y_task)) > n_classes:
                    # Handle continuous class probabilites of positive class for binary
                    if n_classes > 2:
                        raise ValueError(
                            "Cannot handle continuous probabilities for multiclass problems."
                            "Need a per-class probability")
                    # Fill in class 0 probabilities
                    y_task = np.array([1 - y_task, y_task]).T
                    # Add a task dimension to concatenate on
                    y_task = np.expand_dims(y_task, 1)
                    all_y_task.append(y_task)
                else:
                    # Handle binary labels
                    # make y_hot of shape (N, n_classes)
                    y_task = to_one_hot(y_task, n_classes=n_classes)
                    # Add a task dimension to concatenate on
                    y_task = np.expand_dims(y_task, 1)
                    all_y_task.append(y_task)
            y_out = np.concatenate(all_y_task, axis=1)
        elif len(y.shape) == 3:
            y_out = y
    elif mode == "regression":
        if len(y.shape) == 1:
            # Insert a task dimension
            y_out = np.expand_dims(y, 1)
        elif len(y.shape) == 2:
            y_out = y
        elif len(y.shape) == 3:
            if y.shape[-1] != 1:
                raise ValueError(
                    "y must be a float scalar or a ndarray of shape `(N,)` or "
                    "`(N, n_tasks)` or `(N, n_tasks, 1)` for regression problems."
                )
            y_out = np.squeeze(y, axis=-1)
    else:
        raise ValueError("mode must be either classification or regression.")
    return y_out
				tmp_eval_loss, logits = outputs[:2]

				eval_loss += tmp_eval_loss.mean().item()
			nb_eval_steps += 1
			if preds is None:
				preds = logits.detach().cpu().numpy()
				out_label_ids = inputs["labels"].detach().cpu().numpy()
			else:
				preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
				out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)

		eval_loss = eval_loss / nb_eval_steps
		if args.output_mode == "classification":
			preds = np.argmax(preds, axis=1)
		elif args.output_mode == "regression":
			preds = np.squeeze(preds)
		result = compute_metrics(eval_task, preds, out_label_ids)
		results.update(result)

		output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
		with open(output_eval_file, "w") as writer:
			logger.info("***** Eval results {} *****".format(prefix))
			print(result)
			accuracy_matrix[train_task_num][current_task_num] = format(result['acc'],".2f")
			#for key in sorted(result.keys()):
			#	logger.info("  %s = %s", key, str(result[key]))
			#	writer.write("%s = %s\n" % (key, str(result[key])))

	return results, accuracy_matrix

def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009,
          num_epochs = 100, minibatch_size = 64, print_cost = True):
    """
    Implements a three-layer ConvNet in Tensorflow:
    CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED

    Arguments:
    X_train -- training set, of shape (None, 64, 64, 3)
    Y_train -- test set, of shape (None, n_y = 6)
    X_test -- training set, of shape (None, 64, 64, 3)
    Y_test -- test set, of shape (None, n_y = 6)
    learning_rate -- learning rate of the optimization
    num_epochs -- number of epochs of the optimization loop
    minibatch_size -- size of a minibatch
    print_cost -- True to print the cost every 100 epochs

    Returns:
    train_accuracy -- real number, accuracy on the train set (X_train)
    test_accuracy -- real number, testing accuracy on the test set (X_test)
    parameters -- parameters learnt by the model. They can then be used to predict.
    """

    ops.reset_default_graph()                         # to be able to rerun the model without overwriting tf variables
    tf.set_random_seed(1)                             # to keep results consistent (tensorflow seed)
    seed = 3                                          # to keep results consistent (numpy seed)
    (m, n_H0, n_W0, n_C0) = X_train.shape
    n_y = Y_train.shape[1]
    costs = []                                        # To keep track of the cost

    # Create Placeholders of the correct shape
    ### START CODE HERE ### (1 line)
    X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
    ### END CODE HERE ###

    # Initialize parameters
    ### START CODE HERE ### (1 line)
    parameters = initialize_parameters()
    ### END CODE HERE ###

    # Forward propagation: Build the forward propagation in the tensorflow graph
    ### START CODE HERE ### (1 line)
    Z3 = forward_propagation(X, parameters)
    ### END CODE HERE ###

    # Cost function: Add cost function to tensorflow graph
    ### START CODE HERE ### (1 line)
    cost = compute_cost(Z3, Y)
    ### END CODE HERE ###

    # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
    ### START CODE HERE ### (1 line)
    optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
    ### END CODE HERE ###

    # Initialize all the variables globally
    init = tf.global_variables_initializer()

    # Start the session to compute the tensorflow graph
    with tf.Session() as sess:

        # Run the initialization
        sess.run(init)

        # Do the training loop
        for epoch in range(num_epochs):

            minibatch_cost = 0.
            num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
            seed = seed + 1
            minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)

            for minibatch in minibatches:

                # Select a minibatch
                (minibatch_X, minibatch_Y) = minibatch
                # IMPORTANT: The line that runs the graph on a minibatch.
                # Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).
                ### START CODE HERE ### (1 line)
                _ , temp_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
                ### END CODE HERE ###

                minibatch_cost += temp_cost / num_minibatches


            # Print the cost every epoch
            if print_cost == True and epoch % 5 == 0:
                print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
            if print_cost == True and epoch % 1 == 0:
                costs.append(minibatch_cost)


        # plot the cost
        plt.plot(np.squeeze(costs))
        plt.ylabel('cost')
        plt.xlabel('iterations (per tens)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()

        # Calculate the correct predictions
        predict_op = tf.argmax(Z3, 1)
        correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))

        # Calculate accuracy on the test set
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print(accuracy)
        train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
        test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
        print("Train Accuracy:", train_accuracy)
        print("Test Accuracy:", test_accuracy)

        return train_accuracy, test_accuracy, parameters
Example #43
0
 def __init__(self, model, params, est_llr, llf_el):
     self.model = model
     self.params = np.squeeze(params)
     self.llr = est_llr
     self.llf_el = llf_el
Example #44
0
def main(meshfile,file,iexpt=10,iversn=22,yrflag=3,bio_path=None) :
    
    #
    # Trim input netcdf file name being appropriate for reading
    #
    meshfile=str(meshfile)[2:-2]
    logger.info("Reading mesh information from %s."%(meshfile))
    #
    # Read mesh file containing grid and coordinate information.
    # Note that for now, we are using T-grid in vertical which may need
    # to be improved by utilizing W-point along the vertical axis.
    #
    hdept,gdept,mbathy,mbathy_u,mbathy_v,mask,e3t,plon,plat=read_grid(meshfile)
    logger.warning("Reading grid information from regional.grid.[ab] (not completed)")
    #
    # Convert from P-point (i.e. NEMO grid) to U and V HYCOM grids
    #
    mask_u=p2u_2d(mask)
    mask_v=p2v_2d(mask)
    #
    # Read regional.grid.[ab]
    # Grid angle is not used for this product because all quantities are
    # on regular rectangular grid points.
    #
    angle=numpy.zeros(plon.shape)
    #
    # Number vertical layers in T-point.
    #
    nlev=gdept.size
    #
    # layer thickness in the absence of layer partial steps.
    #
    dt = gdept[1:] - gdept[:-1]
    #
    # Prepare/read input data file (in netcdf format). Reference time is 1950-01-01
    #
    logger.info("Reading data files.")
    file=str(file).strip()[2:-2]
    dirname=os.path.dirname(file)
    logger.debug("file name is {}".format(file))
    logger.debug("dirname is {}".format(dirname))
    logger.debug("basename is {}".format(os.path.basename(file)))
    m=re.match("(MERCATOR-PHY-24-)(.*\.nc)",os.path.basename(file))
    logger.debug("file prefix is {}".format(file_pre))
###    m=re.match(file_pre,os.path.basename(file))
    if not m:
        msg="File %s is not a grid2D file, aborting"%file
        logger.error(msg)
        raise ValueError(msg)
    
    #fileinput0=os.path.join(dirname+"/"+"MERCATOR-PHY-24-"+m.group(2))
    file_date=file[-16:-6]
    fileinput0=file
    print((file_date,file))
    next_day=datetime.datetime.strptime(file_date, '%Y-%m-%d')+datetime.timedelta(days=1)
    fileinput1=datetime.datetime.strftime(next_day,'%Y%m%d')
    fileinput1=os.path.join(dirname+"/"+file_pre+fileinput1+'.nc')
    
    logger.info("Reading from %s"%(fileinput0))
    ncid0=netCDF4.Dataset(fileinput0,"r")
    if timeavg_method==1 and os.path.isfile(fileinput1) :
        
        logger.info("timeavg_method=1, Reading from %s"%(fileinput1))
        ncid1=netCDF4.Dataset(fileinput1,"r")
        #
        # Calculate temporal averaged temperature, salinity, and velocity
        #
        uo =   0.5*(ncid0.variables["uo"][0,:,:,:]+    ncid1.variables["uo"][0,:,:,:])
        vo =   0.5*(ncid0.variables["vo"][0,:,:,:]+    ncid1.variables["vo"][0,:,:,:])
        salt = 0.5*(ncid0.variables["so"][0,:,:,:]+    ncid1.variables["so"][0,:,:,:])
        temp = 0.5*(ncid0.variables["thetao"][0,:,:,:]+ncid1.variables["thetao"][0,:,:,:])
        ssh = numpy.squeeze(0.5*(ncid0.variables["zos"][0,:,:]+ncid1.variables["zos"][0,:,:]))
    
    else:
        #
	# Set variables based on current file when timeavg_method ~=1 or the next netcdf file is not available
        logger.debug("time average method set to {}".format(timeavg_method))
        uo =   ncid0.variables["uo"][0,:,:,:]
        vo =   ncid0.variables["vo"][0,:,:,:]
        salt = ncid0.variables["so"][0,:,:,:]
        temp = ncid0.variables["thetao"][0,:,:,:]
        ssh = numpy.squeeze(ncid0.variables["zos"][0,:,:])
    #
    # I will account these values afterward. Because in the current version, I am accounting for missing values using a gap-filling methodology.
    #	
    logger.debug("getting _FillValue")
    uofill=ncid0.variables["uo"]._FillValue
    vofill=ncid0.variables["vo"]._FillValue
    slfill=ncid0.variables["so"]._FillValue
    tlfill=ncid0.variables["thetao"]._FillValue
    shfill=ncid0.variables["zos"]._FillValue

    # Set time
    logger.info("Set time.")
    time=ncid0.variables["time"][0]
    unit=ncid0.variables["time"].units
    tmp=cfunits.Units(unit)
    refy,refm,refd=(1950,1,1)
    tmp2=cfunits.Units("hours since %d-%d-%d 00:00:00"%(refy,refm,refd))
    tmp3=int(numpy.round(cfunits.Units.conform(time,tmp,tmp2)))
    mydt = datetime.datetime(refy,refm,refd,0,0,0) + datetime.timedelta(hours=tmp3) # Then calculate dt. Phew!

    if timeavg_method==1 and os.path.isfile(fileinput1)  :
        fnametemplate="archv.%Y_%j_%H"
        deltat=datetime.datetime(refy,refm,refd,0,0,0) + \
              datetime.timedelta(hours=tmp3) + \
              datetime.timedelta(hours=12)
        oname=deltat.strftime(fnametemplate)
    else:
        #
        # I am assuming that daily mean can be set at 00 instead of 12
        # for cases that there is no information of next day.
        #
        fnametemplate="archv.%Y_%j"
        deltat=datetime.datetime(refy,refm,refd,0,0,0) + \
              datetime.timedelta(hours=tmp3)
        oname=deltat.strftime(fnametemplate) + '_00'

    # model day
    refy, refm, refd=(1900,12,31)
    model_day= deltat-datetime.datetime(refy,refm,refd,0,0,0)
    model_day=model_day.days
    logger.info("Model day in HYCOM is %s"%str(model_day))
    if bio_path:
       jdm,idm=numpy.shape(plon)
       points = numpy.transpose(((plat.flatten(),plon.flatten())))
       delta = mydt.strftime( '%Y-%m-%d')
       # filename format MERCATOR-BIO-14-2013-01-05-00
       print((bio_path,delta))
       idx,biofname=search_biofile(bio_path,delta)
       if idx >7: 
          msg="No available BIO file within a week difference with PHY"
          logger.error(msg)
          raise ValueError(msg)
       logger.info("BIO file %s reading & interpolating to 1/12 deg grid cells ..."%biofname)
       ncidb=netCDF4.Dataset(biofname,"r")
       blon=ncidb.variables["longitude"][:];
       blat=ncidb.variables["latitude"][:]
       minblat=blat.min()
       no3=ncidb.variables["NO3"][0,:,:,:];
       no3[numpy.abs(no3)>1e+10]=numpy.nan
       po4=ncidb.variables["PO4"][0,:,:,:]
       si=ncidb.variables["Si"][0,:,:,:]
       po4[numpy.abs(po4)>1e+10]=numpy.nan
       si[numpy.abs(si)>1e+10]=numpy.nan
       # TODO: Ineed to improve this part
       nz,ny,nx=no3.shape
       dummy=numpy.zeros((nz,ny,nx+1))
       dummy[:,:,:nx]=no3;dummy[:,:,-1]=no3[:,:,-1]
       no3=dummy
       dummy=numpy.zeros((nz,ny,nx+1))
       dummy[:,:,:nx]=po4;dummy[:,:,-1]=po4[:,:,-1]
       po4=dummy
       dummy=numpy.zeros((nz,ny,nx+1))
       dummy[:,:,:nx]=si;dummy[:,:,-1]=si[:,:,-1]
       si=dummy
       dummy=numpy.zeros((nx+1))
       dummy[:nx]=blon
       blon=dummy
       blon[-1]=-blon[0]
# TODO:  Note that the coordinate files are for global configuration while
#        the data file saved for latitude larger than 30. In the case you change your data file coordinate
#        configuration you need to modify the following lines
       bio_coordfile=bio_path[:-4]+"/GLOBAL_ANALYSIS_FORECAST_BIO_001_014_COORD/GLO-MFC_001_014_mask.nc"
       biocrd=netCDF4.Dataset(bio_coordfile,"r")
       blat2 = biocrd.variables['latitude'][:]
       index=numpy.where(blat2>=minblat)[0]
       depth_lev = biocrd.variables['deptho_lev'][index[0]:,:]
#
#
#
       dummy=numpy.zeros((ny,nx+1))
       dummy[:,:nx]=depth_lev;dummy[:,-1]=depth_lev[:,-1]
       depth_lev=dummy
       depth_lev[depth_lev>50]=0
       depth_lev=depth_lev.astype('i')
       dummy_no3=no3
       dummy_po4=po4
       dummy_si=si
       for j in range(ny):
          for i in range(nx):
             dummy_no3[depth_lev[j,i]:nz-2,j,i]=no3[depth_lev[j,i]-1,j,i]
             dummy_po4[depth_lev[j,i]:nz-2,j,i]=po4[depth_lev[j,i]-1,j,i]
             dummy_si[depth_lev[j,i]:nz-2,j,i]=si[depth_lev[j,i]-1,j,i]
       no3=dummy_no3
       po4=dummy_po4
       si=dummy_si

#
       po4 = po4 * 106.0 * 12.01
       si = si   * 6.625 * 12.01
       no3 = no3 * 6.625 * 12.01


    logger.info("Read, trim, rotate NEMO velocities.")
    u=numpy.zeros((nlev,mbathy.shape[0],mbathy.shape[1]))
    v=numpy.zeros((nlev,mbathy.shape[0],mbathy.shape[1]))
    utmp=numpy.zeros((mbathy.shape))
    vtmp=numpy.zeros((mbathy.shape))
    #
    # Metrices to detect carrefully bottom at p-, u-, and v-grid points.While I have used 3D, mask data,following methods are good enough for now.
    #
    if mbathy_method  ==  1 :
        ip = mbathy   == -1
        iu = mbathy_u == -1
        iv = mbathy_v == -1
    else:
        ip = mask   == 0
        iu = mask_u == 0
        iv = mask_v == 0
    #
    # Read 3D velocity field to calculate barotropic velocity
    #
    # Estimate barotropic velocities using partial steps along the vertical axis. Note that for the early version of this code, 
    # I used dt = gdept[1:] - gdept[:-1] on NEMO t-grid. Furthermore, you may re-calculate this part on vertical grid cells for future. 
    #
    logger.info("Calculate barotropic velocities.")
    ubaro,vbaro=calc_uvbaro(uo,vo,e3t,iu,iv)
    #
    # Save 2D fields (here only ubaro & vbaro)
    #
    zeros=numpy.zeros(mbathy.shape)
    #flnm = open(oname+'.txt', 'w')
    #flnm.write(oname)
    #flnm.close()
    ssh = numpy.where(numpy.abs(ssh)>1000,0.,ssh*9.81) # NB: HYCOM srfhgt is in geopotential ...
    #
    outfile = abf.ABFileArchv("./data/"+oname,"w",iexpt=iexpt,iversn=iversn,yrflag=yrflag,)
    outfile.write_field(zeros,                   ip,"montg1"  ,0,model_day,1,0)
    outfile.write_field(ssh,                     ip,"srfhgt"  ,0,model_day,0,0)
    outfile.write_field(zeros,                   ip,"surflx"  ,0,model_day,0,0) # Not used
    outfile.write_field(zeros,                   ip,"salflx"  ,0,model_day,0,0) # Not used
    outfile.write_field(zeros,                   ip,"bl_dpth" ,0,model_day,0,0) # Not used
    outfile.write_field(zeros,                   ip,"mix_dpth",0,model_day,0,0) # Not used
    outfile.write_field(ubaro,                   iu,"u_btrop" ,0,model_day,0,0)
    outfile.write_field(vbaro,                   iv,"v_btrop" ,0,model_day,0,0)
    #
    if bio_path:
       logger.info("Calculate baroclinic velocities, temperature, and salinity data as well as BIO field.")
    else:
       logger.info("Calculate baroclinic velocities, temperature, and salinity data.")
    for k in numpy.arange(u.shape[0]) :
        if bio_path:
           no3k=interpolate2d(blat, blon, no3[k,:,:], points).reshape((jdm,idm))
           no3k = maplev(no3k)
           po4k=interpolate2d(blat, blon, po4[k,:,:], points).reshape((jdm,idm))
           po4k = maplev(po4k)
           si_k=interpolate2d(blat, blon, si[k,:,:], points).reshape((jdm,idm))
           si_k = maplev(si_k)
           if k%10==0 : logger.info("Writing 3D variables including BIO, level %d of %d"%(k+1,u.shape[0]))
        else:
           if k%10==0 : logger.info("Writing 3D variables, level %d of %d"%(k+1,u.shape[0]))
        #

        #
        uo[k,:,:]=numpy.where(numpy.abs(uo[k,:,:])<10,uo[k,:,:],0)
        vo[k,:,:]=numpy.where(numpy.abs(vo[k,:,:])<10,vo[k,:,:],0)

        # Baroclinic velocity (in HYCOM U- and V-grid)
        ul = p2u_2d(numpy.squeeze(uo[k,:,:])) - ubaro
        vl = p2v_2d(numpy.squeeze(vo[k,:,:])) - vbaro
        ul[iu]=spval
        vl[iv]=spval
        
        # Layer thickness
        
        dtl=numpy.zeros(mbathy.shape)
        # Use dt for the water column except the nearest cell to bottom 
        if thickness_method==1:
            if k < u.shape[0]-1 :
                J,I = numpy.where(mbathy>k)
                e3=(e3t[k,:,:])
                dtl[J,I]=dt[k]
                J,I = numpy.where(mbathy==k)
                dtl[J,I]=e3[J,I]
            else:
                e3=(e3t[k,:,:])
                J,I = numpy.where(mbathy==k)
                dtl[J,I]=e3[J,I]
	# Use partial cells for the whole water column.
        else :
            J,I = numpy.where(mbathy>=k)
            dtl[J,I]=e3t[k,J,I]

        # Salinity
        sl = salt[k,:,:]

        # Temperature
        tl = temp[k,:,:]
        # Need to be carefully treated in order to minimize artifacts to the resulting [ab] files.
        if fillgap_method==1:
            J,I= numpy.where(mbathy<k)
            sl = maplev(numpy.where(numpy.abs(sl)<1e2,sl,numpy.nan))
            sl[J,I]=spval
            J,I= numpy.where(mbathy<k)
            tl = maplev(numpy.where(numpy.abs(tl)<1e2,tl,numpy.nan))
            tl[J,I]=spval
        else:
            sl = numpy.where(numpy.abs(sl)<1e2,sl,numpy.nan)
            sl = numpy.minimum(numpy.maximum(maplev(sl),25),80.)
            tl = numpy.where(numpy.abs(tl)<=5e2,tl,numpy.nan)
            tl = numpy.minimum(numpy.maximum(maplev(tl),-5.),50.)

        # Thickness
        dtl = maplev(dtl)
        if k > 0 :
            with numpy.errstate(invalid='ignore'):
                K= numpy.where(dtl < 1e-4)
            sl[K] = sl_above[K]
            tl[K] = tl_above[K]
        #
        sl[ip]=spval
        tl[ip]=spval

        # Save 3D fields
        outfile.write_field(ul      ,iu,"u-vel.",0,model_day,k+1,0)
        outfile.write_field(vl      ,iv,"v-vel.",0,model_day,k+1,0)
        outfile.write_field(dtl*onem,ip,"thknss",0,model_day,k+1,0)
        outfile.write_field(tl      ,ip,"temp" , 0,model_day,k+1,0)
        outfile.write_field(sl      ,ip,"salin" ,0,model_day,k+1,0)
        if bio_path :
           outfile.write_field(no3k      ,ip,"ECO_no3" ,0,model_day,k+1,0)
           outfile.write_field(po4k      ,ip,"ECO_pho" ,0,model_day,k+1,0)
           outfile.write_field(si_k      ,ip,"ECO_sil" ,0,model_day,k+1,0)
                
        tl_above=numpy.copy(tl)
        sl_above=numpy.copy(sl)
    
    outfile.close()
    ncid0.close()
    if timeavg_method==1 and os.path.isfile(fileinput1)  :
        ncid1.close()
    if bio_path :
       ncidb.close()
Example #45
0
# define species
plasma.b_field = ConstantVector3D(Vector3D(1.0, 1.0, 1.0))
plasma.electron_distribution = e_distribution
plasma.composition = [d0_species, d1_species]

# Add a balmer alpha line for visualisation purposes
d_alpha_excit = ExcitationLine(Line(deuterium, 0, (3, 2)))
plasma.models = [d_alpha_excit]


####################
# Visualise Plasma #

# Run some plots to check the distribution functions and emission profile are as expected
r, _, z, t_samples = sample3d(d1_temperature, (0, 4, 200), (0, 0, 1), (-2, 2, 200))
plt.imshow(np.transpose(np.squeeze(t_samples)), extent=[0, 4, -2, 2])
plt.colorbar()
plt.axis('equal')
plt.xlabel('r axis')
plt.ylabel('z axis')
plt.title("Ion temperature profile in r-z plane")

plt.figure()
r, _, z, t_samples = sample3d(d1_temperature, (-4, 4, 400), (-4, 4, 400), (0, 0, 1))
plt.imshow(np.transpose(np.squeeze(t_samples)), extent=[-4, 4, -4, 4])
plt.colorbar()
plt.axis('equal')
plt.xlabel('x axis')
plt.ylabel('y axis')
plt.title("Ion temperature profile in x-y plane")
Example #46
0
        # Each box represents a part of the image where a particular object was detected.
        detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
        detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')
       
          # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)
          # Actual detection.
        (boxes, scores, classes, num) = sess.run(
              [detection_boxes, detection_scores, detection_classes, num_detections],
              feed_dict={image_tensor: image_np_expanded})
          # Visualization of the results of a detection.
        vis_util.visualize_boxes_and_labels_on_image_array(
              image_np,
              np.squeeze(boxes),
              np.squeeze(classes).astype(np.int32),
              np.squeeze(scores),
              category_index,
              use_normalized_coordinates=True,
              line_thickness=8)
  
        VideoFileOutput.write(image_np)
        cv2.imshow('live_detection',image_np)
        if cv2.waitKey(25) & 0xFF==ord('q'):
            break
            cv2.destroyAllWindows()
            cap.release()
Example #47
0
def variable_to_numpy(variable):
    return np.squeeze(variable.cpu().detach().numpy())
Example #48
0
roi_paths = glob.glob(ROI_DIR + '/*.nii')
roi_names = [r.split(os.sep)[-1].split('.nii')[0] for r in roi_paths]
tmp_img = nib.load('colin.nii')

roi_coords = []
for i_roi, roi in enumerate(roi_paths):
    roi_nii = nib.load(roi)
    roi_th = nib.Nifti1Image(np.array(roi_nii.get_data() > 0, dtype=np.int16),
                             roi_nii.get_affine(),
                             header=roi_nii.get_header())
    rroi = resample_img(roi_th,
                        target_affine=tmp_img.get_affine(),
                        target_shape=tmp_img.shape[:3],
                        interpolation='nearest')

    cur_roi_img = nib.Nifti1Image(np.array(np.squeeze(rroi.get_data()) > 0,
                                           dtype=np.int32),
                                  affine=tmp_img.get_affine())
    roi_coords.append(plotting.find_xyz_cut_coords(cur_roi_img))

RES_NAME = 'net_pred_combined_clf_' + ROI_DIR
WRITE_DIR = op.join(os.getcwd(), RES_NAME)
if not op.exists(WRITE_DIR):
    os.mkdir(WRITE_DIR)

##############################################################################
# load+preprocess data
##############################################################################

print('Loading ADHD data (1=ADHD)...')
rs_files = glob.glob('/Volumes/porsche/adhd_niak/fmri*/*.nii.gz')
Example #49
0
File: cmp.py Project: afcarl/StarAE
def main():
    """test for Sparse AE"""
    os.system('rm -rf log')
    T = []
    T1 = SparseAE(64,
                  49,
                  optimize_method='sgd',
                  max_iter=10,
                  debug=0,
                  verbose=True,
                  tol=1e-8,
                  mini_batch=64,
                  momentum=0,
                  momen_beta=.95,
                  alpha=.01,
                  adastep=1,
                  logger='sgd_good.csv')
    T2 = SparseAE(64,
                  49,
                  optimize_method='sgd',
                  max_iter=10,
                  debug=0,
                  verbose=True,
                  tol=1e-8,
                  mini_batch=64,
                  momentum=0,
                  momen_beta=.95,
                  alpha=.01,
                  adastep=1,
                  logger='sgd_ill.csv')
    T3 = SparseAE(64,
                  49,
                  optimize_method='sgd',
                  max_iter=10,
                  debug=0,
                  verbose=True,
                  tol=1e-8,
                  mini_batch=64,
                  momentum=0,
                  momen_beta=.95,
                  alpha=.01,
                  adastep=1,
                  logger='sgd_worse.csv')
    T4 = SparseAE(64,
                  49,
                  optimize_method='sgd',
                  max_iter=10,
                  debug=0,
                  verbose=True,
                  tol=1e-8,
                  mini_batch=64,
                  momentum=0,
                  momen_beta=.95,
                  alpha=.01,
                  adastep=1,
                  logger='sgd_worst.csv')
    T.append(T1)
    T.append(T2)
    T.append(T3)
    T.append(T4)

    X = []
    X1 = vs.load_sample('IMAGES.mat', patch_size=8, n_patches=20480)
    X2 = vs.load_sample('IMAGES.mat', patch_size=8, n_patches=20480)
    X3 = vs.load_sample('IMAGES.mat', patch_size=8, n_patches=20480)
    X4 = vs.load_sample('IMAGES.mat', patch_size=8, n_patches=20480)
    idx_jitter = np.random.permutation(64)
    idx_jitter_pos = idx_jitter[:16]
    idx_jitter_neg = idx_jitter[-16:]
    for i, j in zip(idx_jitter_pos[:3], idx_jitter_neg[:3]):
        X2[i, :] = np.squeeze(np.random.normal(0.89, 0.01, [
            20480,
        ]))
        X2[j, :] = np.squeeze(np.random.normal(0.11, 0.01, [
            20480,
        ]))
    for i, j in zip(idx_jitter_pos[:8], idx_jitter_neg[:8]):
        X3[i, :] = np.squeeze(np.random.normal(0.899, 0.0001, [
            20480,
        ]))
        X3[j, :] = np.squeeze(np.random.normal(0.101, 0.0001, [
            20480,
        ]))
    for i, j in zip(idx_jitter_pos, idx_jitter_neg):
        X4[i, :] = np.squeeze(np.random.normal(0.89999, 0.000001, [
            20480,
        ]))
        X4[j, :] = np.squeeze(np.random.normal(0.10001, 0.000001, [
            20480,
        ]))
    X.extend([X1, X2, X3, X4])
    print "cond(X1): ", np.linalg.cond(X1)
    print "cond(X2): ", np.linalg.cond(X2)
    print "cond(X3): ", np.linalg.cond(X3)
    print "cond(X4): ", np.linalg.cond(X4)

    for i in range(len(T)):
        try:
            T[i].train(X[i])
        except:
            print 'Training shut down\n'
            pass
    def test_model(self, path, model_name, type_to_restore="SavedModel", show_incorrect=True):
        """

        :param path:
        :param model_name:
        :param type_to_restore: String, "SavedModel" or "SavedSession"
        :param show_incorrect:  show incorrectly guessed images
        :return:
        """

        print("\n{}".format("#" * 50))

        # self.test_data = self.create_test_data("dataset/test", "test_data", typ="np")

        test_imgs, test_lbls = self.test_data[0], self.test_data[1]
        test_batch = 5

        length = len(test_lbls)

        n_correct = 0
        total_n = length

        with tf.Session(graph=tf.Graph()) as sess:

            graph = tf.get_default_graph()

            if type_to_restore == "SavedModel":
                tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.TRAINING], path + model_name)
            else:
                saver = tf.train.import_meta_graph(path + model_name)
                saver.restore(sess, tf.train.latest_checkpoint(path))

            x_img = graph.get_tensor_by_name("x_img:0")
            y_lbl = graph.get_tensor_by_name("y_lbl:0")
            predictor = graph.get_tensor_by_name("accuracy/prediction:0")
            keep_prob = graph.get_tensor_by_name("dropout/keep_prob:0")

            i = 0
            lngth = len(test_lbls)

            while i < lngth:
                # ensure that all images in test set are tested, even if final set has size less than `test_batch`
                if (lngth - test_batch) >= i:
                    imgs = test_imgs[i: (i + test_batch)]
                    lbls = test_lbls[i: (i + test_batch)]
                else:
                    imgs = test_imgs[i:lngth]
                    lbls = test_lbls[i:lngth]

                print("{}\nTest batch {}".format("-" * 15, i / test_batch))

                preds = sess.run(predictor, feed_dict={x_img: imgs, keep_prob: 1.0})

                truths = np.asarray([t[1] for t in lbls])               # TODO:  These indices should match

                print("predictions: \t", preds)
                print("truths: \t\t", truths)

                correct = np.asarray([1 if p == t else 0 for p, t in zip(preds, truths)])
                n_correct += sum(correct)

                acc = float(sum(correct)) / len(correct)
                print("accuracy: {:.3f}".format(acc))

                acc_thresh = 0.8
                if show_incorrect and acc < acc_thresh:

                    print("Accuracy less than {}%, displaying incorrect guesses...".format(acc_thresh))

                    for idx in range(len(correct)):
                        if correct[idx]:
                            continue

                        incorrect_img = imgs[idx]
                        guess = preds[idx]

                        # im =
                        plt.imshow(np.squeeze(incorrect_img, axis=2), cmap='gray')

                        plt.title(
                            (TadpoleConvNet.CLASS_ONE, TadpoleConvNet.CLASS_TWO)[guess] + " (false)"
                        )
                        plt.show()

                i += test_batch     # increment i with test_batch step size

            accuracy = graph.get_tensor_by_name("accuracy/accuracy:0")
            print("test final accuracy: ", sess.run(accuracy, feed_dict={x_img: test_imgs, y_lbl: test_lbls, keep_prob: 1.0}))

            print("-" * 30)
            print('Final Test Accuracy: {:.6f}'.format(
                float(n_correct) / total_n)
            )
            print("-" * 30)

        return float(n_correct) / total_n
def main(args):
    sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)

    print('Creating networks and loading parameters')

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)

    with open(bounding_boxes_filename, "w") as text_file:
        nrof_images_total = 0
        nrof_successfully_aligned = 0
        if args.random_order:
            random.shuffle(dataset)
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            if not os.path.exists(output_class_dir):
                os.makedirs(output_class_dir)
                if args.random_order:
                    random.shuffle(cls.image_paths)
            for image_path in cls.image_paths:
                nrof_images_total += 1
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                output_filename = os.path.join(output_class_dir, filename + '.png')
                print(image_path)
                if not os.path.exists(output_filename):
                    try:
                        img = imageio.imread(image_path)
                    except (IOError, ValueError, IndexError) as e:
                        errorMessage = '{}: {}'.format(image_path, e)
                        print(errorMessage)
                    else:
                        if img.ndim < 2:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))
                            continue
                        if img.ndim == 2:
                            img = facenet.to_rgb(img)
                        img = img[:, :, 0:3]

                        bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
                        nrof_faces = bounding_boxes.shape[0]
                        if nrof_faces > 0:
                            det = bounding_boxes[:, 0:4]
                            img_size = np.asarray(img.shape)[0:2]
                            if nrof_faces > 1:
                                bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
                                img_center = img_size / 2
                                offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
                                                     (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
                                offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
                                index = np.argmax(
                                    bounding_box_size - offset_dist_squared * 2.0)  # some extra weight on the centering
                                det = det[index, :]
                            det = np.squeeze(det)
                            bb = np.zeros(4, dtype=np.int32)
                            # print(det[0], det[1], det[2], det[3])
                            bb[0] = np.maximum(det[0] - args.margin / 2, 0)
                            bb[1] = np.maximum(det[1] - args.margin / 2, 0)
                            bb[2] = np.minimum(det[2] + args.margin / 2, img_size[1])
                            bb[3] = np.minimum(det[3] + args.margin / 2, img_size[0])
                            # print(bb)
                            cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                            # scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
                            scaled = np.array(Image.fromarray(cropped).resize((args.image_size, args.image_size)))
                            '''
                            img = scipy.misc.imresize(myImage, size=(num_px, num_px))
                            img = np.array(Image.fromarray(myImage).resize((num_px, num_px)))'''
                            nrof_successfully_aligned += 1
                            imageio.imsave(output_filename, scaled)
                            text_file.write('%s %d %d %d %d\n' % (output_filename, bb[0], bb[1], bb[2], bb[3]))
                        else:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))

    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
def model(X, Y, word_to_vec_map, learning_rate=0.01, num_iterations=400):
    """
    Model to train word vector representations in numpy.

    Arguments:
    X -- input data, numpy array of sentences as strings, of shape (m, 1)
    Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1)
    word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
    learning_rate -- learning_rate for the stochastic gradient descent algorithm
    num_iterations -- number of iterations

    Returns:
    pred -- vector of predictions, numpy-array of shape (m, 1)
    W -- weight matrix of the softmax layer, of shape (n_y, n_h)
    b -- bias of the softmax layer, of shape (n_y,)
    """

    np.random.seed(1)

    # Define number of training examples
    m = Y.shape[0]  # number of training examples
    n_y = 5  # number of classes
    n_h = 50  # dimensions of the GloVe vectors

    # Initialize parameters using Xavier initialization
    W = np.random.randn(n_y, n_h) / np.sqrt(n_h)
    b = np.zeros((n_y,))

    # Convert Y to Y_onehot with n_y classes
    Y_oh = convert_to_one_hot(Y, C=n_y)

    # Optimization loop
    for t in range(num_iterations):  # Loop over the number of iterations
        for i in range(m):  # Loop over the training examples

            ### START CODE HERE ### (≈ 4 lines of code)
            # Average the word vectors of the words from the i'th training example
            avg = sentence_to_avg(X[i], word_to_vec_map)

            # Forward propagate the avg through the softmax layer
            z = np.dot(W, avg) + b
            a = softmax(z)

            # Compute cost using the i'th training label's one hot representation and "A" (the output of the softmax)
            cost = -np.squeeze(np.sum(Y_oh[i] * np.log(a)))
            ### END CODE HERE ###

            # Compute gradients
            dz = a - Y_oh[i]
            dW = np.dot(dz.reshape(n_y, 1), avg.reshape(1, n_h))
            db = dz

            # Update parameters with Stochastic Gradient Descent
            W = W - learning_rate * dW
            b = b - learning_rate * db

        if t % 100 == 0:
            print("Epoch: " + str(t) + " --- cost = " + str(cost))
            pred = predict(X, Y, W, b, word_to_vec_map)

    return pred, W, b
Example #53
0
c_cover = list(np.reshape(cover, np.size(cover)))
tf_cover = np.expand_dims(cover, axis=2)

# f = open(message_file, "rb")
# message = f.read()
# f.close()
# c_message = str2bitlist(message)

covers = tf.placeholder(tf.float32, [None, Height, Width, 1], name="covers")

generator = GeneratorModel(covers, is_training=False)
saver = tf.train.Saver()
with tf.Session() as sess:
    saver.restore(sess, tf.train.latest_checkpoint(gan_savedir))
    tf_probmap = sess.run(generator.generator_prediction, {covers: [tf_cover]})
probmap = np.squeeze(tf_probmap)
c_probmap = list(np.reshape(probmap, np.size(probmap)))

for bpp in range(1, 2):
    c_message = list(np.random.randint(0, 2, int(512 * 512 * bpp / 100)))
    (success, c_stego, c_lsb) = STC.embed(c_cover, c_probmap, c_message)
    if success:
        stego = np.reshape(c_stego, (Height, Width)).astype(int)
        diff = np.abs(cover - stego).astype(int)
        scipy.misc.imsave('test/steg10/{0:05d}.png'.format(e.step), diff)

# write_pgm(stego_file, stego)
# np.save('probmap.npy',probmap)

# print(key)
Example #54
0
 def extent(self):
     bc = np.squeeze(self.bin_centers())
     bmin = bc.min(axis=0)
     bmax = bc.max(axis=0)
     return np.asarray([bmin, bmax])
Example #55
0
def generate_SeqFile_SpiralDiffusion(gx, gy, tr, n_shots, mg, ms, fA, n_slices,
                                     reps, st, tPlot, tReport, b_values,
                                     n_dirs, fov, Nx):

    #%% --- 1 - Create new Sequence Object + Parameters
    seq = Sequence()

    # =========
    # Parameters
    # =========
    i_raster_time = 100000
    assert 1 / i_raster_time == seq.grad_raster_time, "Manualy inputed inverse raster time does not match the actual value."

    # =========
    # Code parameters
    # =========
    fatsat_enable = 0  # Fat saturation
    kplot = 0

    # =========
    # Acquisition Parameters
    # =========
    TR = tr  # Spin-Echo parameters - TR in [s]
    n_TR = math.ceil(
        TR * i_raster_time)  # Spin-Echo parameters - number of points TR
    bvalue = b_values  # b-value [s/mm2]
    nbvals = np.shape(bvalue)[0]  # b-value parameters
    ndirs = n_dirs  # b-value parameters
    Ny = Nx
    slice_thickness = st  # Acquisition Parameters in [m]
    Nshots = n_shots

    # =========
    # Gradient Scaling
    # =========
    gscl = np.zeros(nbvals + 1)
    gscl[1:] = np.sqrt(bvalue / np.max(bvalue))
    gdir, nb0s = difunc.get_dirs(ndirs)

    # =========
    # Create system
    # =========
    system = Opts(max_grad=mg,
                  grad_unit='mT/m',
                  max_slew=ms,
                  slew_unit='T/m/s',
                  rf_ringdown_time=20e-6,
                  rf_dead_time=100e-6,
                  adc_dead_time=10e-6)

    #%% --- 2 - Fat saturation
    if fatsat_enable:
        fatsat_str = "_fatsat"
        b0 = 1.494
        sat_ppm = -3.45
        sat_freq = sat_ppm * 1e-6 * b0 * system.gamma
        rf_fs, _, _ = make_gauss_pulse(flip_angle=110 * math.pi / 180,
                                       system=system,
                                       duration=8e-3,
                                       bandwidth=abs(sat_freq),
                                       freq_offset=sat_freq)
        gz_fs = make_trapezoid(channel='z',
                               system=system,
                               delay=calc_duration(rf_fs),
                               area=1 / 1e-4)
    else:
        fatsat_str = ""

    #%% --- 3 - Slice Selection
    # =========
    # Create 90 degree slice selection pulse and gradient
    # =========
    flip90 = fA * pi / 180
    rf, gz, _ = make_sinc_pulse(flip_angle=flip90,
                                system=system,
                                duration=3e-3,
                                slice_thickness=slice_thickness,
                                apodization=0.5,
                                time_bw_product=4)

    # =========
    # Refocusing pulse with spoiling gradients
    # =========
    rf180, gz180, _ = make_sinc_pulse(flip_angle=math.pi,
                                      system=system,
                                      duration=5e-3,
                                      slice_thickness=slice_thickness,
                                      apodization=0.5,
                                      time_bw_product=4)
    rf180.phase_offset = math.pi / 2
    gz_spoil = make_trapezoid(channel='z',
                              system=system,
                              area=6 / slice_thickness,
                              duration=3e-3)

    #%% --- 4 - Gradients
    # =========
    # Spiral trajectory
    # =========
    G = gx + 1J * gy

    #%% --- 5 - ADCs / Readouts
    delta_k = 1 / fov
    adc_samples = math.floor(
        len(G) / 4
    ) * 4 - 2  # Apparently, on Siemens the number of samples needs to be divisible by 4...
    adc = make_adc(num_samples=adc_samples,
                   system=system,
                   duration=adc_samples / i_raster_time)

    # =========
    # Pre-phasing gradients
    # =========
    pre_time = 1e-3
    n_pre_time = math.ceil(pre_time * i_raster_time)
    gz_reph = make_trapezoid(channel='z',
                             system=system,
                             area=-gz.area / 2,
                             duration=pre_time)

    #%% --- 6 - Obtain TE and diffusion-weighting gradient waveform
    # For S&T monopolar waveforms
    # From an initial TE, check we satisfy all constraints -> otherwise increase TE.
    # Once all constraints are okay -> check b-value, if it is lower than the target one -> increase TE
    # Looks time-inefficient but it is fast enough to make it user-friendly.
    # TODO: Re-scale the waveform to the exact b-value because increasing the TE might produce slightly higher ones.

    # Calculate some times constant throughout the process
    # We need to compute the exact time sequence. For the normal SE-MONO-EPI sequence micro second differences
    # are not important, however, if we wanna import external gradients the allocated time for them needs to
    # be the same, and thus exact timing is mandatory. With this in mind, we establish the following rounding rules:
    # Duration of RFs + spoiling, and EPI time to the center of the k-space is always math.ceil().

    # The time(gy) refers to the number of blips, thus we substract 0.5 since the number of lines is always even.
    # The time(gx) refers to the time needed to read each line of the k-space. Thus, if Ny is even, it would take half of the lines plus another half.
    n_duration_center = 0  # The spiral starts right in 0 -- or ADC_dead_time??
    rf_center_with_delay = rf.delay + calc_rf_center(rf)[0]

    n_rf90r = math.ceil((calc_duration(gz) - rf_center_with_delay + pre_time) /
                        seq.grad_raster_time)
    n_rf180r = math.ceil((calc_duration(rf180) + 2 * calc_duration(gz_spoil)) /
                         2 / seq.grad_raster_time)
    n_rf180l = math.floor(
        (calc_duration(rf180) + 2 * calc_duration(gz_spoil)) / 2 /
        seq.grad_raster_time)

    # =========
    # Find minimum TE considering the readout times.
    # =========
    n_TE = math.ceil(20e-3 / seq.grad_raster_time)
    n_delay_te1 = -1
    while n_delay_te1 <= 0:
        n_TE = n_TE + 2

        n_tINV = math.floor(n_TE / 2)
        n_delay_te1 = n_tINV - n_rf90r - n_rf180l

    # =========
    # Find minimum TE for the target b-value
    # =========
    bvalue_tmp = 0
    while bvalue_tmp < np.max(bvalue):
        n_TE = n_TE + 2

        n_tINV = math.floor(n_TE / 2)
        n_delay_te1 = n_tINV - n_rf90r - n_rf180l
        delay_te1 = n_delay_te1 / i_raster_time
        n_delay_te2 = n_tINV - n_rf180r - n_duration_center
        delay_te2 = n_delay_te2 / i_raster_time

        # Waveform Ramp time
        n_gdiff_rt = math.ceil(system.max_grad / system.max_slew /
                               seq.grad_raster_time)

        # Select the shortest available time
        n_gdiff_delta = min(n_delay_te1, n_delay_te2)
        n_gdiff_Delta = n_delay_te1 + 2 * math.ceil(
            calc_duration(gz_spoil) / seq.grad_raster_time) + math.ceil(
                calc_duration(gz180) / seq.grad_raster_time)

        gdiff = make_trapezoid(channel='x',
                               system=system,
                               amplitude=system.max_grad,
                               duration=n_gdiff_delta / i_raster_time)

        # delta only corresponds to the rectangle.
        n_gdiff_delta = n_gdiff_delta - 2 * n_gdiff_rt

        bv = difunc.calc_bval(system.max_grad, n_gdiff_delta / i_raster_time,
                              n_gdiff_Delta / i_raster_time,
                              n_gdiff_rt / i_raster_time)
        bvalue_tmp = bv * 1e-6

    # =========
    # Show final TE and b-values:
    # =========
    print("TE:", round(n_TE / i_raster_time * 1e3, 2), "ms")
    for bv in range(1, nbvals + 1):
        print(
            round(
                difunc.calc_bval(system.max_grad * gscl[bv], n_gdiff_delta /
                                 i_raster_time, n_gdiff_Delta / i_raster_time,
                                 n_gdiff_rt / i_raster_time) * 1e-6, 2),
            "s/mm2")

    TE = n_TE / i_raster_time
    TR = n_TR / i_raster_time

    #%% --- 7 - Crusher gradients
    gx_crush = make_trapezoid(channel='x',
                              area=2 * Nx * delta_k,
                              system=system)
    gz_crush = make_trapezoid(channel='z',
                              area=4 / slice_thickness,
                              system=system)

    # TR delay - Takes everything into account
    # Distance between the center of the RF90s must be TR
    # The n_pre_time here is the time used to drive the Gx, and Gy spiral gradients to zero.
    n_spiral_time = adc_samples
    n_tr_per_slice = math.ceil(TR / n_slices * i_raster_time)
    if fatsat_enable:
        n_tr_delay = n_tr_per_slice - (n_TE - n_duration_center + n_spiral_time) \
                            - math.ceil(rf_center_with_delay * i_raster_time) \
                            - n_pre_time \
                            - math.ceil(calc_duration(gx_crush, gz_crush) * i_raster_time) \
                            - math.ceil(calc_duration(rf_fs, gz_fs) * i_raster_time)
    else:
        n_tr_delay = n_tr_per_slice - (n_TE - n_duration_center + n_spiral_time) \
                        - math.ceil(rf_center_with_delay * i_raster_time) \
                        - n_pre_time \
                        - math.ceil(calc_duration(gx_crush, gz_crush) * i_raster_time)
    tr_delay = n_tr_delay / i_raster_time

    #%% --- 8 - Checks
    # =========
    # Check TR delay time
    # =========
    assert n_tr_delay > 0, "Such parameter configuration needs longer TR."

    # =========
    # Delay time,
    # =========
    # Time between the gradient and the RF180. This time might be zero some times, although it is not normal.
    if n_delay_te1 > n_delay_te2:
        n_gap_te1 = n_delay_te1 - n_delay_te2
        gap_te1 = n_gap_te1 / i_raster_time
        gap_te2 = 0
    else:
        n_gap_te2 = n_delay_te2 - n_delay_te1
        gap_te2 = n_gap_te2 / i_raster_time
        gap_te1 = 0

    #%% --- 9 - b-zero acquisition
    for r in range(reps):
        for d in range(nb0s):
            for nshot in range(Nshots):
                for s in range(n_slices):
                    # Fat saturation
                    if fatsat_enable:
                        seq.add_block(rf_fs, gz_fs)

                    # RF90
                    rf.freq_offset = gz.amplitude * slice_thickness * (
                        s - (n_slices - 1) / 2)
                    seq.add_block(rf, gz)
                    seq.add_block(gz_reph)

                    # Delay for RF180
                    seq.add_block(make_delay(delay_te1))

                    # RF180
                    seq.add_block(gz_spoil)
                    rf180.freq_offset = gz180.amplitude * slice_thickness * (
                        s - (n_slices - 1) / 2)
                    seq.add_block(rf180, gz180)
                    seq.add_block(gz_spoil)

                    # Delay for spiral
                    seq.add_block(make_delay(delay_te2))

                    # Read k-space
                    # Imaging Gradient waveforms
                    gx = make_arbitrary_grad(channel='x',
                                             waveform=np.squeeze(
                                                 G[:, nshot].real),
                                             system=system)
                    gy = make_arbitrary_grad(channel='y',
                                             waveform=np.squeeze(
                                                 G[:, nshot].imag),
                                             system=system)
                    seq.add_block(gx, gy, adc)

                    # Make the spiral finish in zero - I use pre_time because I know for sure it's long enough.
                    # Furthermore, this is after readout and TR is supposed to be long.
                    amp_x = [G[:, nshot].real[-1], 0]
                    amp_y = [G[:, nshot].imag[-1], 0]
                    gx_to_zero = make_extended_trapezoid(channel='x',
                                                         amplitudes=amp_x,
                                                         times=[0, pre_time],
                                                         system=system)
                    gy_to_zero = make_extended_trapezoid(channel='y',
                                                         amplitudes=amp_y,
                                                         times=[0, pre_time],
                                                         system=system)
                    seq.add_block(gx_to_zero, gy_to_zero)

                    seq.add_block(gx_crush, gz_crush)

                    # Wait TR
                    if tr_delay > 0:
                        seq.add_block(make_delay(tr_delay))

    #%% --- 9 - DWI acquisition
    for r in range(reps):
        for bv in range(1, nbvals + 1):
            for d in range(ndirs):
                for nshot in range(Nshots):
                    for s in range(n_slices):
                        # Fat saturation
                        if fatsat_enable:
                            seq.add_block(rf_fs, gz_fs)

                        # RF90
                        rf.freq_offset = gz.amplitude * slice_thickness * (
                            s - (n_slices - 1) / 2)
                        seq.add_block(rf, gz)
                        seq.add_block(gz_reph)

                        # Diffusion-weighting gradient
                        gdiffx = make_trapezoid(channel='x',
                                                system=system,
                                                amplitude=system.max_grad *
                                                gscl[bv] * gdir[d, 0],
                                                duration=calc_duration(gdiff))
                        gdiffy = make_trapezoid(channel='y',
                                                system=system,
                                                amplitude=system.max_grad *
                                                gscl[bv] * gdir[d, 1],
                                                duration=calc_duration(gdiff))
                        gdiffz = make_trapezoid(channel='z',
                                                system=system,
                                                amplitude=system.max_grad *
                                                gscl[bv] * gdir[d, 2],
                                                duration=calc_duration(gdiff))

                        seq.add_block(gdiffx, gdiffy, gdiffz)

                        # Delay for RF180
                        seq.add_block(make_delay(gap_te1))

                        # RF180
                        seq.add_block(gz_spoil)
                        rf180.freq_offset = gz180.amplitude * slice_thickness * (
                            s - (n_slices - 1) / 2)
                        seq.add_block(rf180, gz180)
                        seq.add_block(gz_spoil)

                        # Diffusion-weighting gradient
                        seq.add_block(gdiffx, gdiffy, gdiffz)

                        # Delay for spiral
                        seq.add_block(make_delay(gap_te2))

                        # Read k-space
                        # Imaging Gradient waveforms
                        gx = make_arbitrary_grad(channel='x',
                                                 waveform=np.squeeze(
                                                     G[:, nshot].real),
                                                 system=system)
                        gy = make_arbitrary_grad(channel='y',
                                                 waveform=np.squeeze(
                                                     G[:, nshot].imag),
                                                 system=system)
                        seq.add_block(gx, gy, adc)

                        # Make the spiral finish in zero - I use pre_time because I know for sure it's long enough.
                        # Furthermore, this is after readout and TR is supposed to be long.
                        amp_x = [G[:, nshot].real[-1], 0]
                        amp_y = [G[:, nshot].imag[-1], 0]
                        gx_to_zero = make_extended_trapezoid(
                            channel='x',
                            amplitudes=amp_x,
                            times=[0, pre_time],
                            system=system)
                        gy_to_zero = make_extended_trapezoid(
                            channel='y',
                            amplitudes=amp_y,
                            times=[0, pre_time],
                            system=system)
                        seq.add_block(gx_to_zero, gy_to_zero)

                        seq.add_block(gx_crush, gz_crush)

                        # Wait TR
                        if tr_delay > 0:
                            seq.add_block(make_delay(tr_delay))

    if tPlot:
        seq.plot()

    if tReport:
        print(seq.test_report())
        seq.check_timing()

    return seq, TE, TR, fatsat_str
Example #56
0
def test_loading_weights_by_name_and_reshape():
    """
    test loading model weights by name on:
        - sequential model
    """

    # test with custom optimizer, loss
    custom_opt = optimizers.rmsprop
    custom_loss = losses.mse

    # sequential model
    model = Sequential()
    model.add(Conv2D(2, (1, 1), input_shape=(1, 1, 1), name='rick'))
    model.add(Flatten())
    model.add(Dense(3, name='morty'))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    x = np.random.random((1, 1, 1, 1))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    old_weights = [layer.get_weights() for layer in model.layers]
    _, fname = tempfile.mkstemp('.h5')

    model.save_weights(fname)

    # delete and recreate model
    del (model)
    model = Sequential()
    model.add(Conv2D(2, (1, 1), input_shape=(1, 1, 1), name='rick'))
    model.add(Conv2D(3, (1, 1), name='morty'))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    # load weights from first model
    with pytest.raises(ValueError):
        model.load_weights(fname, by_name=True, reshape=False)
    with pytest.raises(ValueError):
        model.load_weights(fname, by_name=False, reshape=False)
    model.load_weights(fname, by_name=False, reshape=True)
    model.load_weights(fname, by_name=True, reshape=True)

    out2 = model.predict(x)
    assert_allclose(np.squeeze(out), np.squeeze(out2), atol=1e-05)
    for i in range(len(model.layers)):
        new_weights = model.layers[i].get_weights()
        for j in range(len(new_weights)):
            # only compare layers that have weights, skipping Flatten()
            if old_weights[i]:
                assert_allclose(old_weights[i][j], new_weights[j], atol=1e-05)

    # delete and recreate model with `use_bias=False`
    del (model)
    model = Sequential()
    model.add(
        Conv2D(2, (1, 1), input_shape=(1, 1, 1), use_bias=False, name='rick'))
    model.add(Flatten())
    model.add(Dense(3, name='morty'))
    with pytest.raises(
            ValueError,
            match=r'.* expects [0-9]+ .* but the saved .* [0-9]+ .*'):
        model.load_weights(fname)
    with pytest.raises(
            ValueError,
            match=r'.* expects [0-9]+ .* but the saved .* [0-9]+ .*'):
        model.load_weights(fname, by_name=True)
    with pytest.warns(UserWarning,
                      match=r'Skipping loading .* due to mismatch .*'):
        model.load_weights(fname, by_name=True, skip_mismatch=True)

    # delete and recreate model with `filters=10`
    del (model)
    model = Sequential()
    model.add(Conv2D(10, (1, 1), input_shape=(1, 1, 1), name='rick'))
    with pytest.raises(ValueError,
                       match=r'.* has shape .* but the saved .* shape .*'):
        model.load_weights(fname, by_name=True)
    with pytest.raises(
            ValueError,
            match=r'.* load .* [0-9]+ layers into .* [0-9]+ layers.'):
        model.load_weights(fname)

    os.remove(fname)
Example #57
0
File: fig_7.py Project: PV-Lab/FTCP
        'family': 'Avenir',
        'weight': 'normal',
        'size': 26
    }
math_font = 'stixsans'
plt.rc('font', **font)
plt.rcParams['mathtext.fontset'] = math_font
plt.rcParams['axes.labelsize'] = font['size']
plt.rcParams['xtick.labelsize'] = font['size']-2
plt.rcParams['ytick.labelsize'] = font['size']-2
plt.rcParams['legend.fontsize'] = font['size']-2

i = 2

fig, ax = plt.subplots(1, 2, figsize=(13,5.3))
s0 = ax[0].scatter(train_latent[:,0],train_latent[:,i],s=7,c=np.squeeze(real_y_train_un.iloc[:,1]), cmap=cmap) # real_y_train_un[:,1]
cbar = plt.colorbar(s0, ax=ax[0], ticks=[0.15, 0.85])
cbar.ax.set_yticklabels(['0', '1'])
ax[0].set_xticks([-6,-2,2,6,10])
ax[0].set_yticks([-2, 2, 6, 10, 14])
x, y = 4, 8
ax[0].scatter(x, y, s=150, facecolors='none', edgecolors='#d62728', linewidths=2.5, linestyle='-')
s1 = ax[1].scatter(train_latent[:,0],train_latent[:,i],s=7,c=np.squeeze(real_y_train_un.iloc[:,0])) # real_y_train_un[:,0]
plt.colorbar(s1, ax=ax[1], ticks=[-1, -3, -5, -7])
ax[1].set_xticks([-6,-2,2,6,10])
ax[1].set_yticks([-2, 2, 6, 10, 14])
ax[1].scatter(x, y, s=150, facecolors='none', edgecolors='#d62728', linewidths=2.5, linestyle='-')

plt.tight_layout()
plt.subplots_adjust(wspace=0.2)
Example #58
0
   def detect_face(self, image, *, save: Any = False):
      """Primary method to detect a face from an image.

      Detects faces from an image and draws bounding boxes around them. Images can be
      saved to a file by updating the `save` parameter, and the method returns the annotated image.

      Usage:

      >>> model = FacialDetector()
      >>> image = model.detect_face(cv2.imread('image/file/path'), save = 'image/save/path')

      Arguments:
         - image: A numpy array representing the image, or an image file path.
         - save: Either a filepath to save the annotated image to or None.
      Returns:
         - The annotated image, with bounding boxes around faces.
      """
      # Validate image.
      if not isinstance(image, np.ndarray):
         if isinstance(image, str):
            if not os.path.exists(image):
               raise FileNotFoundError(f"Received image path string, but the path {image} does not exist.")
            image = cv2.imread(image)
         else:
            raise TypeError(f"Expected a numpy array representing the image, got {type(image)}.")
      if len(image.shape) != 3:
         if len(image.shape) == 4:
            if image.shape[0] == 1 or image.shape[3] == 1:
               logging.warning("Received image with 4 dimensions, with either the first or last channel being the batch size."
                               "The image has been compressed, but it may result in issues with the output. For better "
                               "performance, only include images with three dimensions. ")
               image = np.squeeze(image)
         else:
            raise ValueError(f"Image should have three dimensions: width, height, and channels, "
                             f"got {len(image.shape)} dims.")

      # Detect faces from image.
      image_coords = []
      (h, w) = image.shape[:2]

      # Set input blob and forward pass through network.
      blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), swapRB = False, crop = False)
      self.net.setInput(blob)
      faces = self.net.forward()

      # Iterate over faces.
      for dim in range(0, faces.shape[2]):
         # Determine prediction confidence.
         confidence = faces[0, 0, dim, 2]
         if confidence < 0.5:
            continue

         # Determine actual face coordinates.
         box = faces[0, 0, dim, 3:7] * np.array([w, h, w, h])
         (x, y, xe, ye) = box.astype(int)
         image_coords.append((x, y, xe, ye))

         # Annotate image with bounding box.
         cv2.rectangle(image, (x, y), (xe, ye), (0, 255, 255), 3)

      # Save image if requested to.
      if save:
         if not isinstance(save, str):
            raise TypeError(f"The save argument should be a path where the image is going "
                            f"to be saved, got {type(save)}.")
         if not os.path.exists(os.path.dirname(save)):
            raise NotADirectoryError("The directory of the save path provided does not exist. Check your paths.")
         cv2.imwrite(save, image)

      # Return annotated image.
      return image
    print shape(Magvec)
    sm=shape(Magvec)[0]
    sy=shape(data)
    s=(sm, sy[0], sy[2]) 
    print s
    Magcom=Magvec[:,0,:]+1j*Magvec[:,1,:]
    Magcom=reshape(Magcom, s, order="F")
    freq=linspace(fstart, fstart+fstep*(sm-1), sm)
#    
    #freq=linspace(4.0e9, 5.0e9, 1001)
    #print Magcom.dtype, pwr.dtype, yoko.dtype, freq.dtype
    #print rept
    #print yoko
    #print time
    print shape(Magcom)
    Magcom=squeeze(Magcom)
#    print shape(Magcom)
#    print shape(yoko)
#    print yoko
    #print freq
    #Magcom=mean(Magcom, axis=1)
powind=4
print pwr[powind]    
Magabs=Magcom[:, :, :]-mean(Magcom[:, 197:200, :], axis=1, keepdims=True)

fridge_att=87.0+20.0+5.0
pwrlin=0*0.001*10.0**((pwr[powind]-fridge_att)/10.0)

if 0:
    pcolormesh(dB(Magcom[:, :, powind]))   
    show() 
Example #60
0
def nlmeans_proxy(in_file,
                  settings,
                  snr=None,
                  smask=None,
                  nmask=None,
                  out_file=None):
    """
    Uses non-local means to denoise 4D datasets
    """
    from dipy.denoise.nlmeans import nlmeans
    from scipy.ndimage.morphology import binary_erosion
    from scipy import ndimage

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == ".gz":
            fname, fext2 = op.splitext(fname)
            fext = fext2 + fext
        out_file = op.abspath("./%s_denoise%s" % (fname, fext))

    img = nb.load(in_file, mmap=NUMPY_MMAP)
    hdr = img.header
    data = img.get_data()
    aff = img.affine

    if data.ndim < 4:
        data = data[..., np.newaxis]

    data = np.nan_to_num(data)

    if data.max() < 1.0e-4:
        raise RuntimeError("There is no signal in the image")

    df = 1.0
    if data.max() < 1000.0:
        df = 1000.0 / data.max()
        data *= df

    b0 = data[..., 0]

    if smask is None:
        smask = np.zeros_like(b0)
        smask[b0 > np.percentile(b0, 85.0)] = 1

    smask = binary_erosion(smask.astype(np.uint8),
                           iterations=2).astype(np.uint8)

    if nmask is None:
        nmask = np.ones_like(b0, dtype=np.uint8)
        bmask = settings["mask"]
        if bmask is None:
            bmask = np.zeros_like(b0)
            bmask[b0 > np.percentile(b0[b0 > 0], 10)] = 1
            label_im, nb_labels = ndimage.label(bmask)
            sizes = ndimage.sum(bmask, label_im, range(nb_labels + 1))
            maxidx = np.argmax(sizes)
            bmask = np.zeros_like(b0, dtype=np.uint8)
            bmask[label_im == maxidx] = 1
        nmask[bmask > 0] = 0
    else:
        nmask = np.squeeze(nmask)
        nmask[nmask > 0.0] = 1
        nmask[nmask < 1] = 0
        nmask = nmask.astype(bool)

    nmask = binary_erosion(nmask, iterations=1).astype(np.uint8)

    den = np.zeros_like(data)

    est_snr = True
    if snr is not None:
        snr = [snr] * data.shape[-1]
        est_snr = False
    else:
        snr = []

    for i in range(data.shape[-1]):
        d = data[..., i]
        if est_snr:
            s = np.mean(d[smask > 0])
            n = np.std(d[nmask > 0])
            snr.append(s / n)

        den[..., i] = nlmeans(d, snr[i], **settings)

    den = np.squeeze(den)
    den /= df

    nb.Nifti1Image(den.astype(hdr.get_data_dtype()), aff,
                   hdr).to_filename(out_file)
    return out_file, snr