Ejemplo n.º 1
0
def filter_and_mask(imgs, mask_img_, parameters,
                    memory_level=0, memory=Memory(cachedir=None),
                    verbose=0,
                    confounds=None,
                    copy=True):

    imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4)

    # Check whether resampling is truly necessary. If so, crop mask
    # as small as possible in order to speed up the process

    if not _check_same_fov(imgs, mask_img_):
        parameters = copy_object(parameters)
        # now we can crop
        mask_img_ = image.crop_img(mask_img_, copy=False)
        parameters['target_shape'] = mask_img_.shape
        parameters['target_affine'] = mask_img_.get_affine()

    data, affine = filter_and_extract(imgs, _ExtractionFunctor(mask_img_),
                                      parameters,
                                      memory_level=memory_level,
                                      memory=memory,
                                      verbose=verbose,
                                      confounds=confounds, copy=copy)

    # For _later_: missing value removal or imputing of missing data
    # (i.e. we want to get rid of NaNs, if smoothing must be done
    # earlier)
    # Optionally: 'doctor_nan', remove voxels with NaNs, other option
    # for later: some form of imputation

    return data, affine
Ejemplo n.º 2
0
def filter_and_mask(imgs,
                    mask_img_,
                    parameters,
                    memory_level=0,
                    memory=Memory(cachedir=None),
                    verbose=0,
                    confounds=None,
                    copy=True):
    imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4)

    # Check whether resampling is truly necessary. If so, crop mask
    # as small as possible in order to speed up the process

    if not _check_same_fov(imgs, mask_img_):
        parameters = copy_object(parameters)
        # now we can crop
        mask_img_ = image.crop_img(mask_img_, copy=False)
        parameters['target_shape'] = mask_img_.shape
        parameters['target_affine'] = get_affine(mask_img_)

    data, affine = filter_and_extract(imgs,
                                      _ExtractionFunctor(mask_img_),
                                      parameters,
                                      memory_level=memory_level,
                                      memory=memory,
                                      verbose=verbose,
                                      confounds=confounds,
                                      copy=copy)

    # For _later_: missing value removal or imputing of missing data
    # (i.e. we want to get rid of NaNs, if smoothing must be done
    # earlier)
    # Optionally: 'doctor_nan', remove voxels with NaNs, other option
    # for later: some form of imputation
    return data
Ejemplo n.º 3
0
def filter_and_mask(imgs,
                    mask_img_,
                    parameters,
                    memory_level=0,
                    memory=Memory(location=None),
                    verbose=0,
                    confounds=None,
                    sample_mask=None,
                    copy=True,
                    dtype=None):
    """Extract representative time series using given mask.

    Parameters
    ----------
    imgs : 3D/4D Niimg-like object
        Images to be masked. Can be 3-dimensional or 4-dimensional.

    For all other parameters refer to NiftiMasker documentation.

    Returns
    -------
    signals : 2D numpy array
        Signals extracted using the provided mask. It is a scikit-learn
        friendly 2D array with shape n_sample x n_features.

    """
    imgs = _utils.check_niimg(imgs, atleast_4d=True, ensure_ndim=4)

    # Check whether resampling is truly necessary. If so, crop mask
    # as small as possible in order to speed up the process

    if not _check_same_fov(imgs, mask_img_):
        parameters = copy_object(parameters)
        # now we can crop
        mask_img_ = image.crop_img(mask_img_, copy=False)
        parameters['target_shape'] = mask_img_.shape
        parameters['target_affine'] = mask_img_.affine

    data, affine = filter_and_extract(imgs,
                                      _ExtractionFunctor(mask_img_),
                                      parameters,
                                      memory_level=memory_level,
                                      memory=memory,
                                      verbose=verbose,
                                      confounds=confounds,
                                      sample_mask=sample_mask,
                                      copy=copy,
                                      dtype=dtype)

    # For _later_: missing value removal or imputing of missing data
    # (i.e. we want to get rid of NaNs, if smoothing must be done
    # earlier)
    # Optionally: 'doctor_nan', remove voxels with NaNs, other option
    # for later: some form of imputation
    return data
Ejemplo n.º 4
0
 def save(self):
     if self.id and not getattr(self,'_TrackHistory__copied', False):
         old = copy_object(self)
         old._TrackHistory_current=False
         old._save()
         
         self.id = None
         self._TrackHistory_current=True
         self._TrackHistory_revision += 1
         
     super(TrackHistory,self).save()
     
     print self.__class__.objects.all()
Ejemplo n.º 5
0
    def save(self):
        if self.id and not getattr(self, '_TrackHistory__copied', False):
            old = copy_object(self)
            old._TrackHistory_current = False
            old._save()

            self.id = None
            self._TrackHistory_current = True
            self._TrackHistory_revision += 1

        super(TrackHistory, self).save()

        print self.__class__.objects.all()
Ejemplo n.º 6
0
def deep_copy(obj, copy_related = True): 
    
    copied_obj = copy_object(obj) 
    copied_obj.id = None 
    
    if hasattr(copied_obj,'clone'):
        copied_obj.clone() 
    rename_unique(copied_obj)
    rename_unique_together(copied_obj)
    copied_obj.save() 
       
    for original, copy in zip(obj._meta.many_to_many, copied_obj._meta.many_to_many): 
        # get the managers of the fields 
        source = getattr(obj, original.attname) 
        destination = getattr(copied_obj, copy.attname) 
        # copy m2m field contents 
        for element in source.all(): 
            destination.add(element) 
          
    # save for a second time (to apply the copied many to many fields) 
    
    if hasattr(copied_obj,'clone'):
        copied_obj.clone() 
    copied_obj.save() 
        
    if (copy_related):   
        # clone related objects
        links = [rel.get_accessor_name() for rel in obj._meta.get_all_related_objects()]

        for link in links:
            for original in getattr(obj, link).all():
                copied_related = deep_copy(original)
                for field in copied_related._meta.fields:
                    #set foreign key to copied_obj
                    if (getattr(copied_related, field.name) == obj):
                        setattr(copied_related, field.name, copied_obj)
                if hasattr(copied_related,'clone'):
                    copied_related.clone() 
                rename_unique(copied_related)
                copied_related.save()
        
    return copied_obj