Esempio n. 1
0
def downsample2D(data, n1=2, n2=2):
    """
    takes data (a 2D) and generate a smaller dataset downsampled by factor (n1,n2) on each axis
    then returned data-set is n1*n2 times smaller
    - simply takes the mean
    
    ** Not tested on non powers of 2 **
    
    """
    print("coucou2")
    b1, a1 = cheby1(4, 0.05,
                    0.8 / n1)  # construct chebychev 4th order polynomials
    b2, a2 = cheby1(4, 0.05, 0.8 / n2)
    cls = type(data)
    outp = cls(buffer=np.zeros(
        (data.size1 / n1,
         data.size2 / n2)))  # create data set of same type as input dataset
    for i in xrange(0, data.size1, n1):
        temp = np.zeros(data.size2 / n2)
        for j in xrange(n1):
            yy = lfilter(b2, a2, data.row(i + j).buffer)  # filter along F2
            temp += yy[n2 / 2:None:n2]
        outp.buffer[i / n1, :] = (1.0 / n1) * temp
    NPKData.copyaxes(data, outp)
    outp.adapt_size()
    return outp
Esempio n. 2
0
def zoom3D(npkd,
           zoom,
           fontsize=0.7,
           font='times',
           colormap='blue-red',
           showaxes=True):
    """
    use the zoom region and display it in interactive 3D
    
    zoom : f1lo, f1up, f2lo, f2up - expressed in current unit

    remark, use a small zoom region !

    x = axis 2
    y = axis 1
    z = intensity
    """
    # z1lo = int(npkd.axis1.ctoi(zoom[0]))
    # z1up = int(npkd.axis1.ctoi(zoom[1]))
    # z2lo = int(npkd.axis2.ctoi(zoom[2]))
    # z2up = int(npkd.axis2.ctoi(zoom[3]))
    z1lo, z1up, z2lo, z2up = npk.parsezoom(npkd, zoom)
    #print (z1lo, z1up, z2lo, z2up)
    # d2 = npkd.extract(*zoom)  # extract modifies the data -
    # d3 = d2.get_buffer()
    # d4 = np.array(d3)
    # d5 = d4.transpose()
    # get_buffer() loads everything in memory - so we'll do it line by line
    d5 = np.zeros((z2up - z2lo + 1,
                   z1up - z1lo + 1))  # tranposed matrix compared to npkd
    for i in range(z2lo, z2up + 1):
        cc = npkd.col(i)  # going column wise is probably faster ...
        d5[i - z2lo, :] = cc[
            z1lo:z1up +
            1]  # taking a slice out of a npkdata returns a np.array
    zmax = np.amax(d5)
    zmin = np.amin(d5)  # 0  - some data-set are negative
    xmin = zoom[2]
    xmax = zoom[3]
    ymin = zoom[0]
    ymax = zoom[1]
    mlab.figure(bgcolor=(1., 1., 1.), fgcolor=(0., 0., 0.))
    mlab.surf(d5,
              extent=[0, 1000, 0, 1000, 0, 1000],
              warp_scale='auto',
              colormap=colormap)
    ax = mlab.axes(x_axis_visibility=showaxes,
                   y_axis_visibility=showaxes,
                   z_axis_visibility=showaxes,
                   xlabel="F2 " + npkd.axis2.currentunit,
                   ylabel="F1 " + npkd.axis1.currentunit,
                   zlabel='Intensity',
                   ranges=[xmin, xmax, ymin, ymax, zmin, zmax],
                   nb_labels=5)
    ax.label_text_property.font_family = font
    ax.title_text_property.font_family = font
    ax.axes.font_factor = fontsize
Esempio n. 3
0
def downsample2D_med(data, n1=2, n2=2):
    """
    takes data (a 2D) and generate a smaller dataset downsampled by factor (n1,n2) on each axis
    the returned data-set is n1*n2 times smaller
    - simply takes the mean
    
    ** Not tested on non powers of 2 **
    
    """
    print("coucou1")
    cls = type(data)
    outp = cls(dim=2)  # create data set of same type as input dataset
    if n1 > 1:
        yy = medfilt2d(data.buffer, (n1 + 1, n2 + 1))
        outp.buffer = yy[n1 / 2:None:n1, n2 / 2:None:n2]
    else:
        yy = medfilt2d(data.buffer, (1, n2 + 1))
        outp.buffer = yy[:, n2 / 2:None:n2]
    NPKData.copyaxes(data, outp)
    outp.adapt_size()
    return outp
Esempio n. 4
0
def process_DOSY(fid, ppm_offset, lazy=False):
    "Performs all processing of DOSY "
    import spike.plugins.PALMA as PALMA
    global POOL
    d = PALMA.Import_DOSY(fid)
    print('PULPROG', d.params['acqu']['$PULPROG'], '   dfactor',
          d.axis1.dfactor)
    # process in F2
    processed = op.join(op.dirname(fid), 'processed.gs2')
    if op.exists(processed) and lazy:
        dd = npkd.NPKData(name=processed)
        npkd.copyaxes(d, dd)
        dd.axis1.itype = 0
        dd.axis2.itype = 0
        dd.adapt_size()
    else:
        d.chsize(sz2=min(16 * 1024, d.axis2.size))
        d.apod_em(1, axis=2).ft_sim().bruker_corr()
        # automatic phase correction
        r = d.row(2)
        r.apmin()
        d.phase(r.axis1.P0, r.axis1.P1, axis=2).real()
        # correct
        d.axis2.offset += ppm_offset * d.axis2.frequency
        # save
        fiddir = op.dirname(fid)
        d.save(op.join(fiddir, "preprocessed.gs2"))
        # ILT
        NN = 256
        d.prepare_palma(NN, 10.0, 10000.0)
        mppool = POOL
        dd = d.do_palma(miniSNR=20,
                        nbiter=PALMA_ITER,
                        lamda=0.05,
                        mppool=mppool)
        if TMS:
            r = autozero(r)  # calibrate only F2 axis !
            dd.axis2.offset = r.axis1.offset
    dd.axis2.currentunit = 'ppm'
    return dd
Esempio n. 5
0
 def __init__(self,
              dim=1,
              shape=None,
              mode="memory",
              buffer=None,
              name=None,
              debug=0):
     self.axis1 = OrbiAxis(
     )  # this creates an OrbiAxis so that pylint does not complain - will be overwritten
     if dim == 2:
         raise Exception("2D Orbitrap is not physcally defined (yet ?)")
     if name:
         if name.endswith(".msh5"):  # try loading .msh5 file
             if debug > 0: print("reading msh5")
             H = HDF5File(name, "r")
             H.load(mode=mode)  # load into memory by default !
             super(OrbiData, self).__init__(buffer=H.data.buffer,
                                            debug=debug)
             NPKData.copyaxes(H.data,
                              self)  # and deep copy all axes from file
             self.name = name
             self.hdf5file = H
         else:
             raise Exception("Filename should have a .msh5 extension")
     else:
         if debug > 0: print("calling super")
         super(OrbiData, self).__init__(dim=dim,
                                        shape=shape,
                                        buffer=buffer,
                                        name=name,
                                        debug=debug)
         for i in range(self.dim):
             axis = self.axes(i + 1)
             setattr(self, "axis%d" % (i + 1),
                     OrbiAxis(size=axis.size, itype=0))
     if debug > 1: print(self.report())