Exemple #1
0
    def fit(self, pts, polys, recache=False):
        self._n = len(pts)
        self._surface = Surface(pts, polys)

        # check to see if the full K is cached
        dataset_hash = str(hash(pts.tostring())) + str(hash(polys.tostring())) + str(hash(self._m))
        cache_path = os.path.join(self.CACHE_DIR, dataset_hash+'_geodesic_K_cache.npz')
        if not os.path.exists(cache_path) or recache:
            print('Cache not found, computing K..')
            K = np.vstack([self._surface.geodesic_distance([i], m=self._m) for i in counter(range(self._n))])
            self._K = (K + K.T) / 2.0
            np.savez(cache_path, geodesic_K=self._K)
        else:
            print('Loading K from cache..')
            self._K = np.load(cache_path)['geodesic_K']
Exemple #2
0
class MeshKLazy(SymMatrixApprox):
    def __init__(self, func=None, m=1.0):
        """
        m : float, default 1.0
            Time step for the geodesic heat approximation. Should be 1.0 normally,
            but if there's numerical instability, set it higher (e.g. 100.0)
        """
        if func is not None:
            self._func = func
        else:
            self._func = lambda x: x

        self._m = m

    def fit(self, pts, polys):
        self._n = len(pts)
        self._surface = Surface(pts, polys)

    def get_row(self, i):
        return self._func(self._surface.geodesic_distance([i], m=self._m))

    def get_rows(self, rows):
        return np.vstack([
            self._func(self._surface.geodesic_distance([i], m=self._m))
            for i in rows
        ])

    def get_size(self):
        return self._n

    def get_memory(self):
        return (
            self._surface.pts.shape[0] * self._surface.pts.shape[1] +
            self._surface.polys.shape[0] * self._surface.polys.shape[1]) * 8

    def get_name(self):
        return 'MeshKLazy'

    @property
    def shape(self):
        return self._n, self._n
Exemple #3
0
def perpendicular_scalar_func(sfunc, surf, origin, antipode, mask):
    sfunc_grad = surf.surface_gradient(sfunc, at_verts=False)
    normals = surf.face_normals
    grad_perps = np.cross(sfunc_grad, normals)
    #norm_grad_perps = grad_perps / np.sqrt((grad_perps ** 2).sum(1))
    norm_grad_perps = np.nan_to_num(grad_perps.T / np.sqrt(
        (grad_perps**2).sum(1))).T

    # compute integrated divergence
    X = norm_grad_perps
    c32, c13, c21 = surf._cot_edge
    x1 = 0.5 * (c32 * X).sum(1)
    x2 = 0.5 * (c13 * X).sum(1)
    x3 = 0.5 * (c21 * X).sum(1)

    conn1, conn2, conn3 = surf._polyconn
    divx = conn1.dot(x1) + conn2.dot(x2) + conn3.dot(x3)

    # create a surface with a cut
    print("about to cut path")
    cut_path = np.array(surf.geodesic_path(origin, antipode, m=5))
    cut_path = np.union1d(cut_path, mask)
    #noncut_polys = np.array([p for p in surf.polys if ])
    good_polys = ~np.any(
        np.in1d(surf.polys, cut_path).reshape(surf.polys.shape), 1)

    cut_surf = Surface(surf.pts, surf.polys[good_polys])
    # run the geodesic distance code to generate laplace solver (lol)
    cut_surf.geodesic_distance([0])
    print('2')
    cconn1, cconn2, cconn3 = cut_surf._polyconn
    divx_cut = cconn1.dot(x1[good_polys]) + cconn2.dot(
        x2[good_polys]) + cconn3.dot(x3[good_polys])

    # integrate to find fxn whose gradient is norm_grad_perps
    print('3')
    part_perp_func = cut_surf._nLC_solvers[1.0](divx_cut[cut_surf._goodrows])
    perp_func = np.zeros_like(sfunc)
    perp_func[cut_surf._goodrows] = part_perp_func

    return perp_func
Exemple #4
0
    def preprocessing(self, pts, polys):
        self._pts = pts
        self._polys = polys
        self._surface = Surface(pts, polys)

        B, D, lapW, lapV = self._surface.laplace_operator
        npt = len(D)
        Dinv = sparse.dia_matrix((D ** -1, [0]), (npt, npt)).tocsr()  # construct Dinv

        self._M = (lapV - lapW).dot(Dinv.dot(lapV - lapW))

        self._lapW = lapW
        self._lapV = lapV
        self._Dinv = Dinv
Exemple #5
0
class MeshK(SymMatrixApprox):
    CACHE_DIR = "/tmp"

    def __init__(self, func=None, m=1.0):
        self._m = m
        if func is not None:
            self._func = func
        else:
            self._func = lambda x: x

    def fit(self, pts, polys, recache=False):
        self._n = len(pts)
        self._surface = Surface(pts, polys)

        # check to see if the full K is cached
        dataset_hash = str(hash(pts.tostring())) + str(hash(
            polys.tostring())) + str(hash(self._m))
        cache_path = os.path.join(self.CACHE_DIR,
                                  dataset_hash + '_geodesic_K_cache.npz')
        if not os.path.exists(cache_path) or recache:
            print('Cache not found, computing K..')
            K = np.vstack([
                self._surface.geodesic_distance([i], m=self._m)
                for i in counter(range(self._n))
            ])
            self._K = (K + K.T) / 2.0
            np.savez(cache_path, geodesic_K=self._K)
        else:
            print('Loading K from cache..')
            self._K = np.load(cache_path)['geodesic_K']

    def get_row(self, i):
        return self._K[i, :]

    def get_rows(self, rows):
        return self._K[rows, :]

    def get_size(self):
        return self._n

    def get_memory(self):
        return self._K.shape[0] * self._K.shape[1] * 8

    def get_name(self):
        return 'MeshK'

    @property
    def shape(self):
        return self._n, self._n
Exemple #6
0
    def preprocessing(self, pts, polys):
        # Compute Laplacian
        self._pts = pts
        self._polys = polys
        self._surface = Surface(pts, polys)

        B, D, lapW, lapV = self._surface.laplace_operator
        npt = len(D)
        Dinv = sparse.dia_matrix((D**-1, [0]), (npt, npt))  # construct Dinv
        self._L = Dinv.dot(lapV - lapW)
        self._L.tocsc().sort_indices()

        # extract the eigenvalues of the Laplacian
        self.Lambda, self.Phi = sla.eigs(self._L, k=self._me, which='SM')
        self.Lambda = np.real(self.Lambda)
        self.Phi = np.real(self.Phi)
Exemple #7
0
 def fit(self, pts, polys):
     self._n = len(pts)
     self._surface = Surface(pts, polys)
Exemple #8
0
    def _run_interface(self, runtime):
        import h5py
        import surfer.utils as surfutils
        import scipy.sparse
        import scipy.signal
        from cortex.polyutils import Surface

        in_ts = h5py.File(self.inputs.in_file, 'r')
        in_data = in_ts[self.inputs.data_field]
        out_file = self._list_outputs()['out_file']
        out_ts = h5py.File(out_file)

        in_ts.copy('COORDINATES', out_ts)
        in_ts.copy('STRUCTURES', out_ts)  # TODO: fix reference copying fails

        structs = in_ts['STRUCTURES']

        fmri_group = out_ts.create_group('FMRI')
        nfeatures, nsamples = in_data.shape
        out_data = fmri_group.create_dataset('DATA',
                                             dtype=np.float32,
                                             shape=(nfeatures, nsamples),
                                             maxshape=(nfeatures, None))

        for k, v in in_data.attrs.items():
            out_data.attrs[k] = v

        stdmap = np.std(in_data, -1)
        good_voxels = stdmap < self.inputs.std_threshold * stdmap[
            np.logical_not(np.isnan(stdmap))].mean()
        good_voxels[np.isnan(good_voxels)] = False

        for st in structs:
            attrs = structs[st].attrs
            if attrs['ModelType'] == 'SURFACE':
                sl = slice(attrs['IndexOffset'],
                           attrs['IndexOffset'] + attrs['IndexCount'])
                # TODO, move to real heat kernel on surfaces
                """
                adj_mat = surfutils.mesh_edges(
                    np.asarray(structs[st]['TRIANGLES']))
                smooth_mat = surfutils.smoothing_matrix(
                    np.where(good_voxels[sl])[0],
                    adj_mat,
                    self.inputs.smoothing_steps)
                del adj_mat
                # TODO: see if it buffers in memory or not, if so iterate
                # over slabs of data (find optimal)

                sdata =  scipy.signal.detrend(
                    smooth_mat.dot(in_data[sl][good_voxels[sl]]),-1)
                del smooth_mat
                sdata -= sdata.mean(-1)[:,np.newaxis]
                sdata /= sdata.std(-1)[:,np.newaxis]
                sdata[np.isnan(sdata)] = 0
                """
                surf = Surface(np.asarray(in_ts['COORDINATES'][sl]),
                               np.asarray(structs[st]['TRIANGLES']))
                sdata = np.empty((attrs['IndexCount'], in_data.shape[1]))
                frame = np.empty(attrs['IndexCount'], dtype=in_data.dtype)
                for fr in xrange(in_data.shape[1]):
                    frame[:] = in_data[sl, fr]
                    frame[np.isnan(frame)] = 0
                    sdata[:, fr] = surf.smooth(frame,
                                               self.inputs.smoothing_factor)
                del surf, frame
                #sdata[:] = scipy.signal.detrend(sdata,-1)
                #sdata -= sdata.mean(-1)[:,np.newaxis]
                #                sdata /= sdata.std(-1)[:,np.newaxis]
                sdata[np.isnan(sdata)] = 0
                out_data[sl] = sdata
                del sdata
            elif attrs['ModelType'] == 'VOXELS':
                # voxsize should be stored at sampling for convenience
                voxsize = 2.0  # could be anisotropic if necessary, see below
                for roi_name, label, ofst, cnt in structs[st]['ROIS']:
                    if cnt == 0:
                        continue
                    sl = slice(ofst, ofst + cnt)
                    coords = in_ts['COORDINATES'][sl]
                    adj_mat = scipy.sparse.coo_matrix(
                        np.all(
                            np.abs(coords[np.newaxis] - coords[:, np.newaxis])
                            < voxsize * 1.5, -1))
                    smooth_mat = surfutils.smoothing_matrix(
                        np.where(good_voxels[sl])[0], adj_mat,
                        self.inputs.smoothing_steps)
                    del adj_mat
                    # TODO: see if it buffers in memory or not, if so iterate
                    # over slabs of data (find optimal)
                    #sdata =  scipy.signal.detrend(
                    #    smooth_mat.dot(in_data[sl][good_voxels[sl]]),-1)
                    sdata = smooth_mat.dot(in_data[sl][good_voxels[sl]])
                    #sdata -= sdata.mean(-1)[:,np.newaxis]
                    #sdata /= sdata.std(-1)[:,np.newaxis]
                    sdata[np.isnan(sdata)] = 0
                    out_data[sl] = sdata
                    del smooth_mat
        if isdefined(self.inputs.TR):
            tr = self.inputs.TR
        elif 'TR' in in_ts['FMRI'].attrs.keys():
            tr = out_ts['FMRI'].attrs['TR']
        else:
            raise ValueError('TR is not known')

        from scipy import signal

        ub_frac = 1.
        if self.inputs.filter_range[1] is not -1:
            ub_frac = self.inputs.filter_range[1] * tr * 2.
        lb_frac = self.inputs.filter_range[0] * tr * 2.
        if lb_frac > 0 and ub_frac < 1:
            wp = [lb_frac, ub_frac]
            ws = [np.max([lb_frac - 0.1, 0]), np.min([ub_frac + 0.1, 1.0])]
        elif lb_frac == 0:
            wp = ub_frac
            ws = np.min([ub_frac + 0.1, 0.9])
        elif ub_frac == 1:
            wp = lb_frac
            ws = np.max([lb_frac - 0.1, 0.1])
        b, a = signal.iirdesign(wp, ws, 1, 60, ftype='ellip')
        print(b, a)

        #        from scipy.fftpack import dct, idct
        #        for i in xrange(out_ts['FMRI/DATA'].shape[0]):
        #            tmp = signal.filtfilt(
        #                b, a, np.asarray(out_ts['FMRI/DATA'][i]))
        #            out_ts['FMRI/DATA'][i] = (tmp-tmp.mean())/tmp.std()
        """
        ts_dct = dct(out_ts['FMRI/DATA'], axis=1)
        cutoff = np.round(out_ts['FMRI/DATA'].shape[1] * tr / 128)
        ts_dct[:,:cutoff] = 0
        ts_dct = idct(ts_dct,axis=1)
        out_ts['FMRI/DATA'][:] = (ts_dct-ts_dct.mean(1)[:,np.newaxis])/ts_dct.std(1)[:,np.newaxis]
        out_ts['FMRI/DATA'][np.isnan(out_ts['FMRI/DATA'])] = 0 
        del ts_dct
        """
        in_ts.close()
        out_ts.close()
        return runtime
Exemple #9
0
def make_mesh(boundary_in,
              ref_in,
              file_out,
              nlayer,
              flip_faces=False,
              niter_smooth=2,
              niter_upsample=0,
              niter_inflate=15):
    """
    This function generates a surface mesh from a levelset image. The surface mesh is smoothed and a
    curvature file is generated. Vertices are in the vertex ras coordinate system. Optionally, the
    mesh can be upsampled and an inflated version of the mesh can be written out. The hemisphere
    has to be indicated as prefix in the output file. If nlayer is set to -1, a 3D levelset image
    can be used as boundary input file.
    Inputs:
        *boundary_in: filename of 4D levelset image.
        *ref_in: filename of reference volume for getting the coordinate transformation.
        *file_out: filename of output surface.
        *nlayer: layer from the 4D boundary input at which the mesh is generated.
        *flip_faces: reverse normal direction of mesh.
        *niter_smooth: number of smoothing iterations.
        *niter_upsample: number of upsampling iterations (is performed if set > 0).
        *niter_inflate: number of inflating iterations (is performed if set > 0).
    
    created by Daniel Haenelt
    Date created: 18-12-2019
    Last modified: 24-07-2020
    """
    import os
    import numpy as np
    import nibabel as nb
    from nibabel.affines import apply_affine
    from nibabel.freesurfer.io import write_geometry
    from nighres.surface import levelset_to_mesh
    from cortex.polyutils import Surface
    from lib.surface.vox2ras import vox2ras
    from lib.surface.smooth_surface import smooth_surface
    from lib.surface.upsample_surf_mesh import upsample_surf_mesh
    from lib.surface.get_curvature import get_curvature
    from lib.surface import inflate_surf_mesh

    # make output folder
    if not os.path.exists(os.path.dirname(file_out)):
        os.makedirs(os.path.dirname(file_out))

    # get levelset boundary from single layer
    boundary = nb.load(boundary_in)
    boundary.header["dim"][0] = 1
    boundary_array = boundary.get_fdata()

    if nlayer != -1:
        boundary_array = boundary_array[:, :, :, nlayer]

    boundary = nb.Nifti1Image(boundary_array, boundary.affine, boundary.header)

    # make mesh
    surf = levelset_to_mesh(boundary,
                            connectivity="18/6",
                            level=0.0,
                            inclusive=True)

    # get vertices and faces
    vtx = surf["result"]["points"]
    fac = surf["result"]["faces"]

    # get vox2ras transformation
    vox2ras_tkr, _ = vox2ras(ref_in)

    # apply vox2ras to vertices
    vtx = apply_affine(vox2ras_tkr, vtx)

    # flip faces
    if flip_faces:
        fac = np.flip(fac, axis=1)

    # write mesh
    write_geometry(file_out, vtx, fac)

    # smooth surface
    smooth_surface(file_out, file_out, niter_smooth)

    # upsample mesh (optionally)
    if niter_upsample != 0:
        upsample_surf_mesh(file_out, file_out, niter_upsample, "linear")

    # print number of vertices and average edge length
    print("number of vertices: " + str(len(vtx[:, 0])))
    print("average edge length: " + str(Surface(vtx, fac).avg_edge_length))

    # get curvature (looks for hemisphere prefix)
    get_curvature(file_out, os.path.dirname(file_out))

    # inflate surface (optionally)
    if niter_inflate != 0:
        inflate_surf_mesh(file_out, file_out + "_inflated", niter_inflate)
The method used here is biharmonic interpolation, which finds the solution with
the minimum squared Laplacian (fourth derivative) that still passes through all
the selected points. This is similar to thin plate splines.

"""

import cortex
from cortex.polyutils import Surface
import numpy as np
np.random.seed(1234)
from matplotlib import pyplot as plt

subject = "S1"

# First we need to import the surfaces for this subject
lsurf, rsurf = [Surface(*d) for d in cortex.db.get_surf(subject, "fiducial")]

# Let's choose a few points and generate data for them
selected_pts = np.arange(len(lsurf.pts), step=5000)
num_selected_pts = len(selected_pts)
sparse_data = np.random.randn(num_selected_pts)

# Then interpolate
interp_data = lsurf.interp(selected_pts, sparse_data)

# Plot the result
# interp_data is only for the left hemisphere, but the Vertex constructor
# infers that and fills the right hemisphere with zeros
interp_vertex = cortex.Vertex(interp_data[:, 0],
                              subject,
                              vmin=-2,
Exemple #11
0
    orig_params = []
    dense_params = []
    file_surf = ["sphere", "white", "pial",
                 "inflated"]  # list of surfaces to subdivide
    for i in range(len(file_surf)):
        for j in range(len(hemi)):
            file_in = os.path.join(path, sub, "surf",
                                   hemi[j] + "." + file_surf[i])
            file_out = os.path.join(path_dense, hemi[j] + "." + file_surf[i])
            upsample_surf_mesh(file_in, file_out, niter_upsample,
                               method_upsample)

            if i == 0:
                vtx, fac = read_geometry(file_in)
                vtx_dense, fac_dense = read_geometry(file_out)
                orig = [len(vtx[:, 0]), Surface(vtx, fac).avg_edge_length]
                dense = [
                    len(vtx_dense[:, 0]),
                    Surface(vtx_dense, fac_dense).avg_edge_length
                ]
                orig_params.extend(orig)
                dense_params.extend(dense)

    # transform curv to dense surface
    print("Transform morphological files to dense")
    for i in range(len(hemi)):
        morph2dense(os.path.join(path, sub, "surf", hemi[i] + ".sphere"),
                    os.path.join(path_dense, hemi[i] + ".sphere"),
                    os.path.join(path, sub, "surf", hemi[i] + ".curv"),
                    path_dense)
import numpy as np
import os
from cortex.polyutils import Surface
from perpendicular_path import perpendicular_scalar_func
import io_mesh as io
from neighbours import get_neighbours

surf_io = io.load_mesh_geometry('icbm_avg_mid_sym_mc_left_hires.obj')
surf = Surface(surf_io['coords'], surf_io['faces'])

atlas = np.loadtxt('icbm_avg_mid_sym_mc_atlas_left.txt')
lobule = np.loadtxt('lobule.txt')
medial = (atlas == 0).astype(int)
medial[lobule == 1] = 1
mask = np.where(medial == 1)[0]
dists = surf.geodesic_distance(mask, m=5)

max_start = np.argmax(dists)
#np.savetxt('dists.txt',dists)

inv_dists = surf.geodesic_distance(max_start, m=5)
filtered = inv_dists.copy()
filtered[medial == 0] = 500
#np.savetxt('invdists.txt',inv_dists)
#filtered=filtered*0
#filtered[max_start]=1
#np.savetxt('start.txt',filtered)

start_vert = np.argmax(dists)
end_vert = np.argmin(filtered)
#print end_vert
def calculate_equivolumetric_surfaces(file_white, file_pial, n_surfs, factor,
                                      niter, hemi, path_output):
    """
    The script calculates intracortical surfaces based on equi-volumetric layering. It is an 
    adaption of Konrad Wagstyl's function in surface_tools. Here, the io_mesh is not used anymore 
    and the call to a freesurfer function is omitted. Instead, vertex-wise area is calculated in a 
    separate function and we use the nibabel to read the surface geometry. First, vertex-wise area 
    is calculated from both input geometries. Smoothing to the areas is optional and done if factor 
    is set to a non-zero value. Then, based on vertex-wise area, equi-volumetric surfaces are 
    computed.
    Inputs:
        *file_white: input of GM/WM surface.
        *file_pial: input of GM/CSF surface.
        *n_surfs: number of output surfaces (returns input surfaces as 0 and 1).
        *factor: amount of smoothing.
        *niter: number of smoothing iterations.
        *hemi: declare hemisphere for output file.
        *path_output: path where output is saved.
    
    created by Daniel Haenelt
    Date created: 01-11-2018             
    Last modified: 10-06-2020
    """
    import os
    import numpy as np
    from nibabel.freesurfer.io import read_geometry, write_geometry
    from cortex.polyutils import Surface
    from lib.segmentation.calculate_area import calculate_area

    def beta(alpha, aw, ap):
        """Compute euclidean distance fraction, beta, that will yield the desired
        volume fraction, alpha, given vertex areas in the white matter surface, 
        aw, and on the pial surface, ap.
    
        A surface with `alpha` fraction of the cortical volume below it and 
        `1 - alpha` fraction above it can then be constructed from pial, px, and 
        white matter, pw, surface coordinates as `beta * px + (1 - beta) * pw`.
        """
        if alpha == 0:
            return np.zeros_like(aw)
        elif alpha == 1:
            return np.ones_like(aw)
        else:
            return 1 - (1 / (ap - aw) *
                        (-aw + np.sqrt((1 - alpha) * ap**2 + alpha * aw**2)))

    # make output folder
    if not os.path.exists(path_output):
        os.makedirs(path_output)

    # load geometry and area data
    wm_vtx, wm_fac = read_geometry(file_white)
    pial_vtx, pial_fac = read_geometry(file_pial)
    wm_vertexareas = calculate_area(file_white)
    pial_vertexareas = calculate_area(file_pial)

    # smoothing area files (optional)
    if factor != 0:
        wm_vertexareas = Surface(wm_vtx, wm_fac).smooth(wm_vertexareas,
                                                        factor=factor,
                                                        iterations=niter)
        pial_vertexareas = Surface(pial_vtx, pial_fac).smooth(pial_vertexareas,
                                                              factor=factor,
                                                              iterations=niter)

    # number of equally space intracortical surfaces
    vectors = wm_vtx - pial_vtx
    tmp_vtx = pial_vtx.copy()
    tmp_fac = pial_fac.copy()
    mask = vectors.sum(
        axis=1) != 0  # create mask where vertex coordinates match

    for depth in range(n_surfs):
        print("creating surface " + str(depth + 1))
        betas = beta(
            float(depth) / (n_surfs - 1), wm_vertexareas[mask],
            pial_vertexareas[mask])
        betas = np.nan_to_num(betas)
        tmp_vtx[mask] = pial_vtx[mask] + vectors[mask] * np.array([betas]).T
        write_geometry(
            os.path.join(path_output, hemi + "." + "layer" + str(depth)),
            tmp_vtx, tmp_fac)
Exemple #14
0
    def _run_interface(self, runtime):
        import h5py
        import surfer.utils as surfutils
        import scipy.sparse
        import scipy.signal
        from cortex.polyutils import Surface

        in_ts = h5py.File(self.inputs.in_file, 'r')
        in_data = in_ts[self.inputs.data_field]
        out_file = self._list_outputs()['out_file']
        out_ts = h5py.File(out_file)

        in_ts.copy('COORDINATES',out_ts)
        in_ts.copy('STRUCTURES',out_ts) # TODO: fix reference copying fails
        
        structs = in_ts['STRUCTURES']

        fmri_group = out_ts.create_group('FMRI')
        nfeatures, nsamples = in_data.shape
        out_data = fmri_group.create_dataset(
            'DATA', dtype=np.float32,
            shape=(nfeatures,nsamples),
            maxshape=(nfeatures,None))
        
        for k,v in in_data.attrs.items():
            out_data.attrs[k] = v

        stdmap = np.std(in_data,-1)
        good_voxels = stdmap < self.inputs.std_threshold * stdmap[np.logical_not(np.isnan(stdmap))].mean()
        good_voxels[np.isnan(good_voxels)] = False

        for st in structs:
            attrs = structs[st].attrs
            if attrs['ModelType'] == 'SURFACE':
                sl = slice(attrs['IndexOffset'],
                           attrs['IndexOffset']+attrs['IndexCount'])
                # TODO, move to real heat kernel on surfaces

                """
                adj_mat = surfutils.mesh_edges(
                    np.asarray(structs[st]['TRIANGLES']))
                smooth_mat = surfutils.smoothing_matrix(
                    np.where(good_voxels[sl])[0],
                    adj_mat,
                    self.inputs.smoothing_steps)
                del adj_mat
                # TODO: see if it buffers in memory or not, if so iterate
                # over slabs of data (find optimal)

                sdata =  scipy.signal.detrend(
                    smooth_mat.dot(in_data[sl][good_voxels[sl]]),-1)
                del smooth_mat
                sdata -= sdata.mean(-1)[:,np.newaxis]
                sdata /= sdata.std(-1)[:,np.newaxis]
                sdata[np.isnan(sdata)] = 0
                """
                surf = Surface(np.asarray(in_ts['COORDINATES'][sl]),
                               np.asarray(structs[st]['TRIANGLES']))
                sdata = np.empty((attrs['IndexCount'],in_data.shape[1]))
                frame = np.empty(attrs['IndexCount'],dtype=in_data.dtype)
                for fr in xrange(in_data.shape[1]):
                    frame[:] = in_data[sl,fr]
                    frame[np.isnan(frame)]=0
                    sdata[:,fr] = surf.smooth(frame, self.inputs.smoothing_factor)
                del surf, frame
                #sdata[:] = scipy.signal.detrend(sdata,-1)
                #sdata -= sdata.mean(-1)[:,np.newaxis]
#                sdata /= sdata.std(-1)[:,np.newaxis]
                sdata[np.isnan(sdata)] = 0
                out_data[sl] = sdata 
                del sdata
            elif attrs['ModelType'] == 'VOXELS':
                # voxsize should be stored at sampling for convenience
                voxsize = 2.0 # could be anisotropic if necessary, see below
                for roi_name, label, ofst, cnt in structs[st]['ROIS']:
                    if cnt == 0:
                        continue
                    sl = slice(ofst, ofst+cnt)
                    coords = in_ts['COORDINATES'][sl]
                    adj_mat = scipy.sparse.coo_matrix(np.all(
                        np.abs(coords[np.newaxis] -
                               coords[:,np.newaxis])<voxsize*1.5, -1))
                    smooth_mat = surfutils.smoothing_matrix(
                        np.where(good_voxels[sl])[0],
                        adj_mat,
                        self.inputs.smoothing_steps)
                    del adj_mat
                    # TODO: see if it buffers in memory or not, if so iterate
                    # over slabs of data (find optimal)
                    #sdata =  scipy.signal.detrend(
                    #    smooth_mat.dot(in_data[sl][good_voxels[sl]]),-1)
                    sdata = smooth_mat.dot(in_data[sl][good_voxels[sl]])
                    #sdata -= sdata.mean(-1)[:,np.newaxis]
                    #sdata /= sdata.std(-1)[:,np.newaxis]
                    sdata[np.isnan(sdata)] = 0
                    out_data[sl] = sdata 
                    del smooth_mat
        if isdefined(self.inputs.TR):
            tr = self.inputs.TR
        elif 'TR' in in_ts['FMRI'].attrs.keys():
            tr = out_ts['FMRI'].attrs['TR']
        else:
            raise ValueError('TR is not known')
        
        from scipy import signal

        ub_frac = 1.
        if self.inputs.filter_range[1] is not -1:
            ub_frac = self.inputs.filter_range[1] * tr * 2.
        lb_frac = self.inputs.filter_range[0] * tr * 2.
        if lb_frac > 0 and ub_frac < 1:
            wp = [lb_frac, ub_frac]
            ws = [np.max([lb_frac - 0.1, 0]),np.min([ub_frac + 0.1, 1.0])]
        elif lb_frac == 0:
            wp = ub_frac
            ws = np.min([ub_frac + 0.1, 0.9])
        elif ub_frac == 1:
            wp = lb_frac
            ws = np.max([lb_frac - 0.1, 0.1])
        b, a = signal.iirdesign(wp, ws, 1, 60,ftype='ellip')
        print(b,a)

#        from scipy.fftpack import dct, idct
#        for i in xrange(out_ts['FMRI/DATA'].shape[0]):
#            tmp = signal.filtfilt(
#                b, a, np.asarray(out_ts['FMRI/DATA'][i]))
#            out_ts['FMRI/DATA'][i] = (tmp-tmp.mean())/tmp.std()

        """
        ts_dct = dct(out_ts['FMRI/DATA'], axis=1)
        cutoff = np.round(out_ts['FMRI/DATA'].shape[1] * tr / 128)
        ts_dct[:,:cutoff] = 0
        ts_dct = idct(ts_dct,axis=1)
        out_ts['FMRI/DATA'][:] = (ts_dct-ts_dct.mean(1)[:,np.newaxis])/ts_dct.std(1)[:,np.newaxis]
        out_ts['FMRI/DATA'][np.isnan(out_ts['FMRI/DATA'])] = 0 
        del ts_dct
        """
        in_ts.close()
        out_ts.close()
        return runtime