Esempio n. 1
0
    def apply_events_export_events(self,
                                   fname,
                                   raw=None,
                                   condition_list=None,
                                   picks=None,
                                   **kwargv):
        """
         export events and epochs for condition into mne fif data
         
        """

        if kwargv['template_name']:
            self.template_name = kwargv['template_name']

        if kwargv['verbose']:
            self.verbose = kwargv['verbose']

        # self.template_update_file()

        fhdf = None
        raw, fname = jumeg_base.get_raw_obj(fname, raw=raw)
        evt_ids = self.events_export_events(raw=raw,
                                            fhdf=fhdf,
                                            condition_list=condition_list,
                                            **kwargv['parameter'])

        print "===> DONE apply events export events: " + fname + "\n"

        return (fname, raw, evt_ids)
Esempio n. 2
0
    def apply_events_to_hdf(self,
                            fname,
                            raw=None,
                            condition_list=None,
                            picks=None,
                            **kwargv):
        """
        find stimulus and/or response events for each condition; save to hdf5 format
        """

        if kwargv['template_name']:
            self.template_name = kwargv['template_name']

        if kwargv['verbose']:
            self.verbose = kwargv['verbose']

        self.template_update_file()

        fhdf = None
        raw, fname = jumeg_base.get_raw_obj(fname, raw=raw)

        fhdf = self.events_store_to_hdf(raw, condition_list=condition_list)

        print "===> DONE  apply epoches to HDF: " + fhdf + "\n"

        return (fname, raw, fhdf)
Esempio n. 3
0
      def ctps_init_brain_response_data(self,fname,raw=None,fname_ica=None,ica_raw=None,template_name=None):
          """

          :param fname:
          :param raw:
          :param fname_ica:
          :param ica_raw:
          :param template_name:
          :return:
          """

          print " ---> Start CTPS  init select brain responses"

         #--- ck template
          if template_name:
             self.template_name = template_name
          else:
             assert "ERROR no <template_name> specified !!\n\n"

          self.raw,fname = jumeg_base.get_raw_obj(fname,raw=raw)
          
          self.ica_raw,fname_ica = jumeg_base.get_ica_raw_obj(fname_ica,ica_raw=ica_raw)
       
          self.ica_picks = np.arange( self.ica_raw.n_components_ )

         #--- open HDFobj
          self.hdf_obj_open(fname=fname,raw=self.raw)

         #---
          self.ctps_hdf_parameter['fnica'] = self.ica_raw.info['filename'],
          self.ctps_hdf_parameter['ncomp'] = len(self.ica_picks),
          self.ctps_hdf_parameter['sfreq'] = self.ica_raw.info['sfreq'],
          self.ctps_hdf_parameter['scale_factor'] = self.scale_factor
Esempio n. 4
0
      def ctps_init_brain_response_clean_data(self,fname,raw=None,fname_ica=None,ica_raw=None,fhdf=None,template_name=None):
          """

          :param fname:
          :param raw:
          :param fname_ica:
          :param ica_raw:
          :param fhdf:
          :param template_name:
          :return:
          """
          print " ---> Start CTPS  init clean brain responses"

         #--- ck template
          if template_name:
             self.template_name = template_name
         # else:
         #    assert "ERROR no <template_name> specified !!\n\n"
          self.raw,fname = jumeg_base.get_raw_obj(fname,raw=raw)
          
         #--- load ica raw obj & init
          if ica_raw is None:
             if fname_ica is None:
                assert "ERROR no file foumd!!\n\n"
             self.ica_raw = mne.preprocessing.read_ica(fname_ica)
          else:
             self.ica_raw = ica_raw

          self.ica_picks = np.arange( self.ica_raw.n_components_ )

         #--- open HDFobj
          self.hdf_obj_open(fname=fname,raw=self.raw,fhdf=fhdf)
def apply_filter_data(fname,raw=None,filter_method="mne",filter_type='bp',fcut1=1.0,fcut2=45.0,notch=None,notch_max=None,order=4,
                      remove_dcoffset = False,njobs=1,overwrite = False,do_run=True,verbose=False,save=True,picks=None,
                      fif_postfix=None, fif_extention='-raw.fif'):
    ''' 
    Applies the FIR FFT filter [bp,hp,lp,notches] to data array. 
    filter_method : mne => fft mne-filter
                    bw  => fft butterwoth
                    ws  => fft - windowed sinc 
    '''
            
    from jumeg.filter import jumeg_filter
   
 #--- define filter 
    jfilter = jumeg_filter(filter_method=filter_method,filter_type=filter_type,fcut1=fcut1,fcut2=fcut2,njobs=njobs, 
                                remove_dcoffset=True,order=order)
    jfilter.verbose = verbose                     

    
    if do_run :
       raw,fname = jumeg_base.get_raw_obj(fname,raw=raw)
      
       if picks is None :
          picks = jumeg_base.pick_channels_nobads(raw)
          
    #- apply filter for picks, exclude stim,resp,bads
       jfilter.sampling_frequency = raw.info['sfreq']
    #--- calc notch array 50,100,150 .. max
       if notch :
          jfilter.calc_notches(notch,notch_max)

       jfilter.apply_filter(raw._data,picks=picks )
       jfilter.update_info_filter_settings(raw)

    #--- make output filename
       name_raw = fname.split('-')[0]
       fnfilt   = name_raw + "," + jfilter.filter_name_postfix + fif_extention

       raw.info['filename'] = fnfilt

       if save :
          fnfilt = jumeg_base.apply_save_mne_data(raw,fname=fnfilt)

    else:
     #--- calc notch array 50,100,150 .. max
       if notch :
          jfilter.calc_notches(notch,notch_max)

     #--- make output filename
       name_raw = fname.split('-')[0]
       fnfilt   = name_raw + "," + jfilter.filter_name_postfix + fif_extention

    return (fnfilt, raw)
Esempio n. 6
0
    def apply_events_export_events(self,fname,raw=None,condition_list=None,picks=None,**kwargv):
        """
         export events and epochs for condition into mne fif data
         
        """

        if kwargv['template_name']:
           self.template_name = kwargv['template_name']

        if kwargv['verbose']:
           self.verbose = kwargv['verbose']

        # self.template_update_file()

        fhdf      = None
        raw,fname = jumeg_base.get_raw_obj(fname,raw=raw)        
        evt_ids   = self.events_export_events(raw=raw,fhdf=fhdf,condition_list=condition_list,**kwargv['parameter'])
        
        print "===> DONE apply events export events: " + fname +"\n"

        return (fname,raw,evt_ids)
Esempio n. 7
0
    def apply_events_to_hdf(self, fname,raw=None,condition_list=None,picks=None,**kwargv):
        """
         find stimulus and/or response events for each condition; save to hdf5 format
        """

        if kwargv['template_name']:
           self.template_name = kwargv['template_name']

        if kwargv['verbose']:
           self.verbose = kwargv['verbose']

        self.template_update_file()

        fhdf = None
        raw,fname = jumeg_base.get_raw_obj(fname,raw=raw)
        
        fhdf = self.events_store_to_hdf(raw,condition_list=condition_list)

        print "===> DONE  apply epoches to HDF: " + fhdf +"\n"

        return (fname,raw,fhdf)
def perform_detrending(fname_raw,raw=None,save=True):

    from mne.io import Raw
    from numpy import poly1d, polyfit
    
    raw = jumeg_base.get_raw_obj(fname_raw,raw=raw)
  # get channels
    picks = jumeg_base.pick_meg_and_ref_nobads()
    
    xval  = np.arange(raw._data.shape[1])
  # loop over all channels
    for ipick in picks:
        coeff = polyfit(xval, raw._data[ipick, :], deg=1)
        trend = poly1d(coeff)
        raw._data[ipick, :] -= trend(xval)

    # save detrended data
    if save:
       fname_out = jumeg_base.get_fif_name(raw=raw,postfix='dt')
       jume_base.apply_save_mne_data(raw,fname=fname_out,overwrite=True)

    return raw
def perform_detrending(fname_raw, raw=None, save=True):

    from mne.io import Raw
    from numpy import poly1d, polyfit

    raw = jumeg_base.get_raw_obj(fname_raw, raw=raw)
    # get channels
    picks = jumeg_base.pick_meg_and_ref_nobads()

    xval = np.arange(raw._data.shape[1])
    # loop over all channels
    for ipick in picks:
        coeff = polyfit(xval, raw._data[ipick, :], deg=1)
        trend = poly1d(coeff)
        raw._data[ipick, :] -= trend(xval)

    # save detrended data
    if save:
        fname_out = jumeg_base.get_fif_name(raw=raw, postfix='dt')
        jume_base.apply_save_mne_data(raw, fname=fname_out, overwrite=True)

    return raw
Esempio n. 10
0
             if fname is None:
                assert "ERROR no file foumd!!\n\n"
             self.raw = mne.io.Raw(fname,preload=False)
          else:
             self.raw = raw

         #--- load ica raw obj & init
          if ica_raw is None:
             if fname_ica is None:
                assert "ERROR no file foumd!!\n\n"
             self.ica_raw = mne.preprocessing.read_ica(fname_ica)
          else:
             self.ica_raw = ica_raw

=======
          self.raw,fname = jumeg_base.get_raw_obj(fname,raw=raw)
          
          self.ica_raw,fname_ica = jumeg_base.get_ica_raw_obj(fname_ica,ica_raw=ica_raw)
       
>>>>>>> fb_devel_03082015
          self.ica_picks = np.arange( self.ica_raw.n_components_ )

         #--- open HDFobj
          self.hdf_obj_open(fname=fname,raw=self.raw)

         #---
          self.ctps_hdf_parameter['fnica'] = self.ica_raw.info['filename'],
          self.ctps_hdf_parameter['ncomp'] = len(self.ica_picks),
          self.ctps_hdf_parameter['sfreq'] = self.ica_raw.info['sfreq'],
          self.ctps_hdf_parameter['scale_factor'] = self.scale_factor
def apply_ica_data(fname,raw=None,do_run=False,verbose=False,save=True,fif_extention=".fif",fif_postfix="-ica",**kwargs):
    """
     apply mne ica

      return
        fnica_out  : fif filename of mne ica-obj
        raw        : fif-raw obj
        ICAobj     : mne-ica-object


             Attributes
        ----------
        current_fit : str
            Flag informing about which data type (raw or epochs) was used for
            the fit.
        ch_names : list-like
            Channel names resulting from initial picking.
            The number of components used for ICA decomposition.
        n_components_` : int
            If fit, the actual number of components used for ICA decomposition.
        n_pca_components : int
            See above.
        max_pca_components : int
            The number of components used for PCA dimensionality reduction.
        verbose : bool, str, int, or None
            See above.
        pca_components_` : ndarray
            If fit, the PCA components
        pca_mean_` : ndarray
            If fit, the mean vector used to center the data before doing the PCA.
        pca_explained_variance_` : ndarray
            If fit, the variance explained by each PCA component
        mixing_matrix_` : ndarray
            If fit, the mixing matrix to restore observed data, else None.
        unmixing_matrix_` : ndarray
            If fit, the matrix to unmix observed data, else None.
        exclude : list
            List of sources indices to exclude, i.e. artifact components identified
            throughout the ICA solution. Indices added to this list, will be
            dispatched to the .pick_sources methods. Source indices passed to
            the .pick_sources method via the 'exclude' argument are added to the
            .exclude attribute. When saving the ICA also the indices are restored.
            Hence, artifact components once identified don't have to be added
            again. To dump this 'artifact memory' say: ica.exclude = []
        info : None | instance of mne.io.meas_info.Info
            The measurement info copied from the object fitted.
        n_samples_` : int
            the number of samples used on fit.

    """
    ICAobj = None

    if do_run :
       raw,fname = jumeg_base.get_raw_obj(fname,raw=raw)
      
       from mne.preprocessing import ICA
       picks = jumeg_base.pick_meg_nobads(raw)

      #--- init MNE ICA obj

       kwargs['global_parameter']['verbose'] = verbose
       ICAobj = ICA( **kwargs['global_parameter'] )

      #--- run  mne ica
       kwargs['fit_parameter']['verbose'] = verbose
       ICAobj.fit(raw, picks=picks,**kwargs['fit_parameter'] )

       fnica_out = fname[:fname.rfind('-raw.fif')] + fif_postfix + fif_extention
      # fnica_out = fname[0:len(fname)-4]+'-ica.fif'

      #--- save ICA object
       if save :
          ICAobj.save(fnica_out)

    print "===> Done JuMEG MNE ICA : " + fnica_out
    print "\n"


    return (fnica_out,raw,ICAobj)
def apply_noise_reducer_data(fname,raw=None,do_run=True,verbose=False,save=True,plot=False,
                             reflp=None, refhp=None, refnotch=None,fif_postfix="nr",fif_extention="-raw.fif",**kwargs):
    '''
    Applies the noise reducer to raw obj data or to fif-file.
            the magic ee once
            fb modified for raw obj support
            imports jumeg_noise_reducer_4raw_data
    '''

    import os

#--- import noise_reducer and plot_power_spectrum function
    from jumeg.jumeg_4raw_data_noise_reducer import noise_reducer_4raw_data, plot_denoising_4raw_data

    fname_out = None
    nr_done   = False
    
    if do_run :  
       raw,fname_raw = jumeg_base.get_raw_obj(fname,raw=raw)
       fname_out = jumeg_base.get_fif_name(raw=raw,postfix=fif_postfix,extention=fif_extention,update_raw_fname=False)
     
#--- apply noise reducer for 50 Hz (and harmonics)
     
       if (reflp or refhp):
          raw,fname_out = noise_reducer_4raw_data(fname,raw=raw,reflp=reflp,refhp=refhp,verbose=verbose,save=False,**kwargs['parameter'])
          kwargs['parameter']['detrending'] = None
          nr_done = True
       if refnotch:
          for refn in refnotch:
              raw,fname_out = noise_reducer_4raw_data(None,raw=raw,refnotch=refn,verbose=verbose,save=False,**kwargs['parameter'])
              kwargs['parameter']['detrending'] = None
          nr_done = True
  
     
       # raw.info['filename'] = fname_out
       
       if not nr_done :
          return fname_raw,raw
  
       raw.info['filename'] = fname_out
       
       if save:
          fname_out = jumeg_base.apply_save_mne_data(raw,fname=fname_out)

       if plot:
          print " --> noise reducer plot power spectrum"

          from distutils.dir_util import mkpath

          p,pdf = os.path.split(fname_raw)

          plot_dir = p+ '/plots/'

          mkpath(plot_dir)

          fn_power_spect = plot_dir + pdf[:pdf.rfind('-raw.fif') ]+ ',denoising'

          plot_denoising_4raw_data([fname_raw,fname_out],show=False,fnout=fn_power_spect)

          print"---> noise reducer plot :"  + fn_power_spect

    print "---> Done noise reducer: "+ fname_out

    return (fname_out,raw)
Esempio n. 13
0
def apply_ica_data(fname,
                   raw=None,
                   do_run=False,
                   verbose=False,
                   save=True,
                   fif_extention=".fif",
                   fif_postfix="-ica",
                   **kwargs):
    """
     apply mne ica

      return
        fnica_out  : fif filename of mne ica-obj
        raw        : fif-raw obj
        ICAobj     : mne-ica-object


             Attributes
        ----------
        current_fit : str
            Flag informing about which data type (raw or epochs) was used for
            the fit.
        ch_names : list-like
            Channel names resulting from initial picking.
            The number of components used for ICA decomposition.
        n_components_` : int
            If fit, the actual number of components used for ICA decomposition.
        n_pca_components : int
            See above.
        max_pca_components : int
            The number of components used for PCA dimensionality reduction.
        verbose : bool, str, int, or None
            See above.
        pca_components_` : ndarray
            If fit, the PCA components
        pca_mean_` : ndarray
            If fit, the mean vector used to center the data before doing the PCA.
        pca_explained_variance_` : ndarray
            If fit, the variance explained by each PCA component
        mixing_matrix_` : ndarray
            If fit, the mixing matrix to restore observed data, else None.
        unmixing_matrix_` : ndarray
            If fit, the matrix to unmix observed data, else None.
        exclude : list
            List of sources indices to exclude, i.e. artifact components identified
            throughout the ICA solution. Indices added to this list, will be
            dispatched to the .pick_sources methods. Source indices passed to
            the .pick_sources method via the 'exclude' argument are added to the
            .exclude attribute. When saving the ICA also the indices are restored.
            Hence, artifact components once identified don't have to be added
            again. To dump this 'artifact memory' say: ica.exclude = []
        info : None | instance of mne.io.meas_info.Info
            The measurement info copied from the object fitted.
        n_samples_` : int
            the number of samples used on fit.

    """
    ICAobj = None

    if do_run:
        raw, fname = jumeg_base.get_raw_obj(fname, raw=raw)

        from mne.preprocessing import ICA
        picks = jumeg_base.pick_meg_nobads(raw)

        #--- init MNE ICA obj

        kwargs['global_parameter']['verbose'] = verbose
        ICAobj = ICA(**kwargs['global_parameter'])

        #--- run  mne ica
        kwargs['fit_parameter']['verbose'] = verbose
        ICAobj.fit(raw, picks=picks, **kwargs['fit_parameter'])

        fnica_out = fname[:fname.rfind('-raw.fif'
                                       )] + fif_postfix + fif_extention
        # fnica_out = fname[0:len(fname)-4]+'-ica.fif'

        #--- save ICA object
        if save:
            ICAobj.save(fnica_out)

    print "===> Done JuMEG MNE ICA : " + fnica_out
    print "\n"

    return (fnica_out, raw, ICAobj)
Esempio n. 14
0
def apply_filter_data(fname,
                      raw=None,
                      filter_method="mne",
                      filter_type='bp',
                      fcut1=1.0,
                      fcut2=45.0,
                      notch=None,
                      notch_max=None,
                      order=4,
                      remove_dcoffset=False,
                      njobs=1,
                      overwrite=False,
                      do_run=True,
                      verbose=False,
                      save=True,
                      picks=None,
                      fif_postfix=None,
                      fif_extention='-raw.fif'):
    ''' 
    Applies the FIR FFT filter [bp,hp,lp,notches] to data array. 
    filter_method : mne => fft mne-filter
                    bw  => fft butterwoth
                    ws  => fft - windowed sinc 
    '''

    from jumeg.filter import jumeg_filter

    #--- define filter
    jfilter = jumeg_filter(filter_method=filter_method,
                           filter_type=filter_type,
                           fcut1=fcut1,
                           fcut2=fcut2,
                           njobs=njobs,
                           remove_dcoffset=True,
                           order=order)
    jfilter.verbose = verbose

    if do_run:
        raw, fname = jumeg_base.get_raw_obj(fname, raw=raw)

        if picks is None:
            picks = jumeg_base.pick_channels_nobads(raw)

    #- apply filter for picks, exclude stim,resp,bads
        jfilter.sampling_frequency = raw.info['sfreq']
        #--- calc notch array 50,100,150 .. max
        if notch:
            jfilter.calc_notches(notch, notch_max)

        jfilter.apply_filter(raw._data, picks=picks)
        jfilter.update_info_filter_settings(raw)

        #--- make output filename
        name_raw = fname.split('-')[0]
        fnfilt = name_raw + "," + jfilter.filter_name_postfix + fif_extention

        raw.info['filename'] = fnfilt

        if save:
            fnfilt = jumeg_base.apply_save_mne_data(raw, fname=fnfilt)

    else:
        #--- calc notch array 50,100,150 .. max
        if notch:
            jfilter.calc_notches(notch, notch_max)

    #--- make output filename
        name_raw = fname.split('-')[0]
        fnfilt = name_raw + "," + jfilter.filter_name_postfix + fif_extention

    return (fnfilt, raw)
def noise_reducer_4raw_data(fname_raw,raw=None,signals=[],noiseref=[],detrending=None,
                  tmin=None,tmax=None,reflp=None,refhp=None,refnotch=None,
                  exclude_artifacts=True,checkresults=True,
                  fif_extention="-raw.fif",fif_postfix="nr",                        
                  reject={'grad':4000e-13,'mag':4e-12,'eeg':40e-6,'eog':250e-6},
                  complementary_signal=False,fnout=None,verbose=False,save=True):

    """Apply noise reduction to signal channels using reference channels.
        
       !!! ONLY ONE RAW Obj Interface Version FB !!!
           
    Parameters
    ----------
    fname_raw : rawfile name

    raw     : fif raw object

    signals : list of string
              List of channels to compensate using noiseref.
              If empty use the meg signal channels.
    noiseref : list of string | str
              List of channels to use as noise reference.
              If empty use the magnetic reference channsls (default).
    signals and noiseref may contain regexp, which are resolved
    using mne.pick_channels_regexp(). All other channels are copied.
    tmin : lower latency bound for weight-calc [start of trace]
    tmax : upper latency bound for weight-calc [ end  of trace]
           Weights are calc'd for (tmin,tmax), but applied to entire data set
    refhp : high-pass frequency for reference signal filter [None]
    reflp :  low-pass frequency for reference signal filter [None]
            reflp < refhp: band-stop filter
            reflp > refhp: band-pass filter
            reflp is not None, refhp is None: low-pass filter
            reflp is None, refhp is not None: high-pass filter
    refnotch : (base) notch frequency for reference signal filter [None]
               use raw(ref)-notched(ref) as reference signal
    exclude_artifacts: filter signal-channels thru _is_good() [True]
                       (parameters are at present hard-coded!)
    complementary_signal : replaced signal by traces that would be subtracted [False]
                           (can be useful for debugging)
    checkresults : boolean to control internal checks and overall success [True]

    reject =  dict for rejection threshold 
              units:
              grad:    T / m (gradiometers)
              mag:     T (magnetometers)
              eeg/eog: uV (EEG channels)
              default=>{'grad':4000e-13,'mag':4e-12,'eeg':40e-6,'eog':250e-6}
              
    save : save data to fif file

    Outputfile:
    -------
    <wawa>,nr-raw.fif for input <wawa>-raw.fif

    Returns
    -------
    TBD

    Bugs
    ----
    - artifact checking is incomplete (and with arb. window of tstep=0.2s)
    - no accounting of channels used as signal/reference
    - non existing input file handled ungracefully
    """

    tc0 = time.clock()
    tw0 = time.time()

    if type(complementary_signal) != bool:
        raise ValueError("Argument complementary_signal must be of type bool")

    raw,fname_raw = jumeg_base.get_raw_obj(fname_raw,raw=raw)
    
    
    if detrending:
       raw = perform_detrending(None,raw=raw, save=False)
    
    tc1 = time.clock()
    tw1 = time.time()

    if verbose:
       print ">>> loading raw data took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tc0), (tw1 - tw0))

    # Time window selection
    # weights are calc'd based on [tmin,tmax], but applied to the entire data set.
    # tstep is used in artifact detection
    # tmin,tmax variables must not be changed here!
    if tmin is None:
        itmin = 0
    else:
        itmin = int(floor(tmin * raw.info['sfreq']))
    if tmax is None:
        itmax = raw.last_samp
    else:
        itmax = int(ceil(tmax * raw.info['sfreq']))

    if itmax - itmin < 2:
        raise ValueError("Time-window for noise compensation empty or too short")

    if verbose:
        print ">>> Set time-range to [%7.3f,%7.3f]" % \
              (raw.index_as_time(itmin)[0], raw.index_as_time(itmax)[0])

    if signals is None or len(signals) == 0:
        sigpick = jumeg_base.pick_meg_nobads(raw)
    else:
        sigpick = channel_indices_from_list(raw.info['ch_names'][:], signals,
                                            raw.info.get('bads'))
    nsig = len(sigpick)
    if nsig == 0:
        raise ValueError("No channel selected for noise compensation")

    if noiseref is None or len(noiseref) == 0:
        # References are not limited to 4D ref-chans, but can be anything,
        # incl. ECG or powerline monitor.
        if verbose:
            print ">>> Using all refchans."
            
        refexclude = "bads"      
        refpick = jumeg_base.pick_ref_nobads(raw)
    else:
        refpick = channel_indices_from_list(raw.info['ch_names'][:], noiseref,
                                            raw.info.get('bads'))
    nref = len(refpick)
    if nref == 0:
        raise ValueError("No channel selected as noise reference")

    if verbose:
        print ">>> sigpick: %3d chans, refpick: %3d chans" % (nsig, nref)

    if reflp is None and refhp is None and refnotch is None:
        use_reffilter = False
        use_refantinotch = False
    else:
        use_reffilter = True
        if verbose:
            print "########## Filter reference channels:"

        use_refantinotch = False
        if refnotch is not None:
            if reflp is None and reflp is None:
                use_refantinotch = True
                freqlast = np.min([5.01 * refnotch, 0.5 * raw.info['sfreq']])
                if verbose:
                    print ">>> notches at freq %.1f and harmonics below %.1f" % (refnotch, freqlast)
            else:
                raise ValueError("Cannot specify notch- and high-/low-pass"
                                 "reference filter together")
        else:
            if verbose:
                if reflp is not None:
                    print ">>>  low-pass with cutoff-freq %.1f" % reflp
                if refhp is not None:
                    print ">>> high-pass with cutoff-freq %.1f" % refhp

        # Adapt followg drop-chans cmd to use 'all-but-refpick'
        droplist = [raw.info['ch_names'][k] for k in xrange(raw.info['nchan']) if not k in refpick]
        tct = time.clock()
        twt = time.time()
        fltref = raw.drop_channels(droplist, copy=True)
        if use_refantinotch:
            rawref = raw.drop_channels(droplist, copy=True)
            freqlast = np.min([5.01 * refnotch, 0.5 * raw.info['sfreq']])
            fltref.notch_filter(np.arange(refnotch, freqlast, refnotch),
                                picks=np.array(xrange(nref)), method='iir')
            fltref._data = (rawref._data - fltref._data)
        else:
            fltref.filter(refhp, reflp, picks=np.array(xrange(nref)), method='iir')
        tc1 = time.clock()
        tw1 = time.time()
        if verbose:
            print ">>> filtering ref-chans  took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt))

    if verbose:
        print "########## Calculating sig-ref/ref-ref-channel covariances:"
    # Calculate sig-ref/ref-ref-channel covariance:
    # (there is no need to calc inter-signal-chan cov,
    #  but there seems to be no appropriat fct available)
    # Here we copy the idea from compute_raw_data_covariance()
    # and truncate it as appropriate.
    tct = time.clock()
    twt = time.time()
    # The following reject and infosig entries are only
    # used in _is_good-calls.
    # _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to
    # ignore ref-channels (not covered by dict) and checks individual
    # data segments - artifacts across a buffer boundary are not found.
    
    #--- !!! FB put to kwargs    
    
    #reject = dict(grad=4000e-13, # T / m (gradiometers)
    #              mag=4e-12,     # T (magnetometers)
    #              eeg=40e-6,     # uV (EEG channels)
    #              eog=250e-6)    # uV (EOG channels)

    infosig = copy.copy(raw.info)
    infosig['chs'] = [raw.info['chs'][k] for k in sigpick]
    infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]
    infosig['nchan'] = len(sigpick)
    idx_by_typesig = channel_indices_by_type(infosig)

    # Read data in chunks:
    tstep = 0.2
    itstep = int(ceil(tstep * raw.info['sfreq']))
    sigmean = 0
    refmean = 0
    sscovdata = 0
    srcovdata = 0
    rrcovdata = 0
    n_samples = 0

    for first in range(itmin, itmax, itstep):
        last = first + itstep
        if last >= itmax:
            last = itmax
        raw_segmentsig, times = raw[sigpick, first:last]
        if use_reffilter:
            raw_segmentref, times = fltref[:, first:last]
        else:
            raw_segmentref, times = raw[refpick, first:last]

        if not exclude_artifacts or \
           _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,
                    ignore_chs=raw.info['bads']):
            sigmean += raw_segmentsig.sum(axis=1)
            refmean += raw_segmentref.sum(axis=1)
            sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
            srcovdata += np.dot(raw_segmentsig, raw_segmentref.T)
            rrcovdata += np.dot(raw_segmentref, raw_segmentref.T)
            n_samples += raw_segmentsig.shape[1]
        else:
            logger.info("Artefact detected in [%d, %d]" % (first, last))
    if n_samples <= 1:
        raise ValueError('Too few samples to calculate weights')
    sigmean /= n_samples
    refmean /= n_samples
    sscovdata -= n_samples * sigmean[:] * sigmean[:]
    sscovdata /= (n_samples - 1)
    srcovdata -= n_samples * sigmean[:, None] * refmean[None, :]
    srcovdata /= (n_samples - 1)
    rrcovdata -= n_samples * refmean[:, None] * refmean[None, :]
    rrcovdata /= (n_samples - 1)
    sscovinit = np.copy(sscovdata)
    if verbose:
        print ">>> Normalize srcov..."

    rrslope = copy.copy(rrcovdata)
    for iref in xrange(nref):
        dtmp = rrcovdata[iref, iref]
        if dtmp > TINY:
            srcovdata[:, iref] /= dtmp
            rrslope[:, iref] /= dtmp
        else:
            srcovdata[:, iref] = 0.
            rrslope[:, iref] = 0.

    if verbose:
        print ">>> Number of samples used : %d" % n_samples
        tc1 = time.clock()
        tw1 = time.time()
        print ">>> sigrefchn covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt))

    if checkresults:
        if verbose:
            print "########## Calculated initial signal channel covariance:"
            # Calculate initial signal channel covariance:
            # (only used as quality measure)
            print ">>> initl rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata))
            for i in xrange(5):
                print ">>> initl signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i]))
            print ">>>"

    U, s, V = np.linalg.svd(rrslope, full_matrices=True)
    if verbose:
        print ">>> singular values:"
        print s
        print ">>> Applying cutoff for smallest SVs:"

    dtmp = s.max() * SVD_RELCUTOFF
    s *= (abs(s) >= dtmp)
    sinv = [1. / s[k] if s[k] != 0. else 0. for k in xrange(nref)]
    if verbose:
        print ">>> singular values (after cutoff):"
        print s

    stat = np.allclose(rrslope, np.dot(U, np.dot(np.diag(s), V)))
    if verbose:
        print ">>> Testing svd-result: %s" % stat
        if not stat:
            print "    (Maybe due to SV-cutoff?)"

    # Solve for inverse coefficients:
    # Set RRinv.tr=U diag(sinv) V
    RRinv = np.transpose(np.dot(U, np.dot(np.diag(sinv), V)))
    if checkresults:
        stat = np.allclose(np.identity(nref), np.dot(RRinv, rrslope))
        if stat:
            if verbose:
                print ">>> Testing RRinv-result (should be unit-matrix): ok"
        else:
            print ">>> Testing RRinv-result (should be unit-matrix): failed"
            print np.transpose(np.dot(RRinv, rrslope))
            print ">>>"

    if verbose:
        print "########## Calc weight matrix..."

    # weights-matrix will be somewhat larger than necessary,
    # (to simplify indexing in compensation loop):
    weights = np.zeros((raw._data.shape[0], nref))
    for isig in xrange(nsig):
        for iref in xrange(nref):
            weights[sigpick[isig],iref] = np.dot(srcovdata[isig,:], RRinv[:,iref])

    if verbose:
        print "########## Compensating signal channels:"
        if complementary_signal:
            print ">>> Caveat: REPLACING signal by compensation signal"

    tct = time.clock()
    twt = time.time()

    # Work on entire data stream:
    for isl in xrange(raw._data.shape[1]):
        slice = np.take(raw._data, [isl], axis=1)
        if use_reffilter:
            refslice = np.take(fltref._data, [isl], axis=1)
            refarr = refslice[:].flatten() - refmean
            # refarr = fltres[:,isl]-refmean
        else:
            refarr = slice[refpick].flatten() - refmean
        subrefarr = np.dot(weights[:], refarr)

        if not complementary_signal:
            raw._data[:, isl] -= subrefarr
        else:
            raw._data[:, isl] = subrefarr

        if (isl % 10000 == 0) and verbose:
            print "\rProcessed slice %6d" % isl

    if verbose:
        print "\nDone."
        tc1 = time.clock()
        tw1 = time.time()
        print ">>> compensation loop took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt))

    if checkresults:
        if verbose:
            print "########## Calculating final signal channel covariance:"
        # Calculate final signal channel covariance:
        # (only used as quality measure)
        tct = time.clock()
        twt = time.time()
        sigmean = 0
        sscovdata = 0
        n_samples = 0
        for first in range(itmin, itmax, itstep):
            last = first + itstep
            if last >= itmax:
                last = itmax
            raw_segmentsig, times = raw[sigpick, first:last]
            # Artifacts found here will probably differ from pre-noisered artifacts!
            if not exclude_artifacts or \
               _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
                        flat=None, ignore_chs=raw.info['bads']):
                sigmean += raw_segmentsig.sum(axis=1)
                sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
                n_samples += raw_segmentsig.shape[1]
        sigmean /= n_samples
        sscovdata -= n_samples * sigmean[:] * sigmean[:]
        sscovdata /= (n_samples - 1)
        
        if verbose:
            print ">>> no channel got worse: ", np.all(np.less_equal(sscovdata, sscovinit))
            print ">>> final rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata))
            for i in xrange(5):
                print ">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i]))
            tc1 = time.clock()
            tw1 = time.time()
            print ">>> signal covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt))
            print ">>>"

   #--- fb update 21.07.2015     
    fname_out = jumeg_base.get_fif_name(raw=raw,postfix=fif_postfix,extention=fif_extention)                       
      
    if save:    
       jumeg_base.apply_save_mne_data(raw,fname=fname_out,overwrite=True)
     
             
    tc1 = time.clock()
    tw1 = time.time()
    if verbose:
       print ">>> Total run took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tc0), (tw1 - tw0))
        
    return raw,fname_out  
Esempio n. 16
0
def apply_noise_reducer_data(fname,
                             raw=None,
                             do_run=True,
                             verbose=False,
                             save=True,
                             plot=False,
                             reflp=None,
                             refhp=None,
                             refnotch=None,
                             fif_postfix="nr",
                             fif_extention="-raw.fif",
                             **kwargs):
    '''
    Applies the noise reducer to raw obj data or to fif-file.
            the magic ee once
            fb modified for raw obj support
            imports jumeg_noise_reducer_4raw_data
    '''

    import os

    #--- import noise_reducer and plot_power_spectrum function
    from jumeg.jumeg_4raw_data_noise_reducer import noise_reducer_4raw_data, plot_denoising_4raw_data

    fname_out = None
    nr_done = False

    if do_run:
        raw, fname_raw = jumeg_base.get_raw_obj(fname, raw=raw)
        fname_out = jumeg_base.get_fif_name(raw=raw,
                                            postfix=fif_postfix,
                                            extention=fif_extention,
                                            update_raw_fname=False)

        #--- apply noise reducer for 50 Hz (and harmonics)

        if (reflp or refhp):
            raw, fname_out = noise_reducer_4raw_data(fname,
                                                     raw=raw,
                                                     reflp=reflp,
                                                     refhp=refhp,
                                                     verbose=verbose,
                                                     save=False,
                                                     **kwargs['parameter'])
            kwargs['parameter']['detrending'] = None
            nr_done = True
        if refnotch:
            for refn in refnotch:
                raw, fname_out = noise_reducer_4raw_data(None,
                                                         raw=raw,
                                                         refnotch=refn,
                                                         verbose=verbose,
                                                         save=False,
                                                         **kwargs['parameter'])
                kwargs['parameter']['detrending'] = None
            nr_done = True

        # raw.info['filename'] = fname_out

        if not nr_done:
            return fname_raw, raw

        raw.info['filename'] = fname_out

        if save:
            fname_out = jumeg_base.apply_save_mne_data(raw, fname=fname_out)

        if plot:
            print " --> noise reducer plot power spectrum"

            from distutils.dir_util import mkpath

            p, pdf = os.path.split(fname_raw)

            plot_dir = p + '/plots/'

            mkpath(plot_dir)

            fn_power_spect = plot_dir + pdf[:pdf.rfind('-raw.fif'
                                                       )] + ',denoising'

            plot_denoising_4raw_data([fname_raw, fname_out],
                                     show=False,
                                     fnout=fn_power_spect)

            print "---> noise reducer plot :" + fn_power_spect

    print "---> Done noise reducer: " + fname_out

    return (fname_out, raw)
def noise_reducer_4raw_data(fname_raw,
                            raw=None,
                            signals=[],
                            noiseref=[],
                            detrending=None,
                            tmin=None,
                            tmax=None,
                            reflp=None,
                            refhp=None,
                            refnotch=None,
                            exclude_artifacts=True,
                            checkresults=True,
                            fif_extention="-raw.fif",
                            fif_postfix="nr",
                            reject={
                                'grad': 4000e-13,
                                'mag': 4e-12,
                                'eeg': 40e-6,
                                'eog': 250e-6
                            },
                            complementary_signal=False,
                            fnout=None,
                            verbose=False,
                            save=True):
    """Apply noise reduction to signal channels using reference channels.
        
       !!! ONLY ONE RAW Obj Interface Version FB !!!
           
    Parameters
    ----------
    fname_raw : rawfile name

    raw     : fif raw object

    signals : list of string
              List of channels to compensate using noiseref.
              If empty use the meg signal channels.
    noiseref : list of string | str
              List of channels to use as noise reference.
              If empty use the magnetic reference channsls (default).
    signals and noiseref may contain regexp, which are resolved
    using mne.pick_channels_regexp(). All other channels are copied.
    tmin : lower latency bound for weight-calc [start of trace]
    tmax : upper latency bound for weight-calc [ end  of trace]
           Weights are calc'd for (tmin,tmax), but applied to entire data set
    refhp : high-pass frequency for reference signal filter [None]
    reflp :  low-pass frequency for reference signal filter [None]
            reflp < refhp: band-stop filter
            reflp > refhp: band-pass filter
            reflp is not None, refhp is None: low-pass filter
            reflp is None, refhp is not None: high-pass filter
    refnotch : (base) notch frequency for reference signal filter [None]
               use raw(ref)-notched(ref) as reference signal
    exclude_artifacts: filter signal-channels thru _is_good() [True]
                       (parameters are at present hard-coded!)
    complementary_signal : replaced signal by traces that would be subtracted [False]
                           (can be useful for debugging)
    checkresults : boolean to control internal checks and overall success [True]

    reject =  dict for rejection threshold 
              units:
              grad:    T / m (gradiometers)
              mag:     T (magnetometers)
              eeg/eog: uV (EEG channels)
              default=>{'grad':4000e-13,'mag':4e-12,'eeg':40e-6,'eog':250e-6}
              
    save : save data to fif file

    Outputfile:
    -------
    <wawa>,nr-raw.fif for input <wawa>-raw.fif

    Returns
    -------
    TBD

    Bugs
    ----
    - artifact checking is incomplete (and with arb. window of tstep=0.2s)
    - no accounting of channels used as signal/reference
    - non existing input file handled ungracefully
    """

    tc0 = time.clock()
    tw0 = time.time()

    if type(complementary_signal) != bool:
        raise ValueError("Argument complementary_signal must be of type bool")

    raw, fname_raw = jumeg_base.get_raw_obj(fname_raw, raw=raw)

    if detrending:
        raw = perform_detrending(None, raw=raw, save=False)

    tc1 = time.clock()
    tw1 = time.time()

    if verbose:
        print ">>> loading raw data took %.1f ms (%.2f s walltime)" % (
            1000. * (tc1 - tc0), (tw1 - tw0))

    # Time window selection
    # weights are calc'd based on [tmin,tmax], but applied to the entire data set.
    # tstep is used in artifact detection
    # tmin,tmax variables must not be changed here!
    if tmin is None:
        itmin = 0
    else:
        itmin = int(floor(tmin * raw.info['sfreq']))
    if tmax is None:
        itmax = raw.last_samp
    else:
        itmax = int(ceil(tmax * raw.info['sfreq']))

    if itmax - itmin < 2:
        raise ValueError(
            "Time-window for noise compensation empty or too short")

    if verbose:
        print ">>> Set time-range to [%7.3f,%7.3f]" % \
              (raw.index_as_time(itmin)[0], raw.index_as_time(itmax)[0])

    if signals is None or len(signals) == 0:
        sigpick = jumeg_base.pick_meg_nobads(raw)
    else:
        sigpick = channel_indices_from_list(raw.info['ch_names'][:], signals,
                                            raw.info.get('bads'))
    nsig = len(sigpick)
    if nsig == 0:
        raise ValueError("No channel selected for noise compensation")

    if noiseref is None or len(noiseref) == 0:
        # References are not limited to 4D ref-chans, but can be anything,
        # incl. ECG or powerline monitor.
        if verbose:
            print ">>> Using all refchans."

        refexclude = "bads"
        refpick = jumeg_base.pick_ref_nobads(raw)
    else:
        refpick = channel_indices_from_list(raw.info['ch_names'][:], noiseref,
                                            raw.info.get('bads'))
    nref = len(refpick)
    if nref == 0:
        raise ValueError("No channel selected as noise reference")

    if verbose:
        print ">>> sigpick: %3d chans, refpick: %3d chans" % (nsig, nref)

    if reflp is None and refhp is None and refnotch is None:
        use_reffilter = False
        use_refantinotch = False
    else:
        use_reffilter = True
        if verbose:
            print "########## Filter reference channels:"

        use_refantinotch = False
        if refnotch is not None:
            if reflp is None and reflp is None:
                use_refantinotch = True
                freqlast = np.min([5.01 * refnotch, 0.5 * raw.info['sfreq']])
                if verbose:
                    print ">>> notches at freq %.1f and harmonics below %.1f" % (
                        refnotch, freqlast)
            else:
                raise ValueError("Cannot specify notch- and high-/low-pass"
                                 "reference filter together")
        else:
            if verbose:
                if reflp is not None:
                    print ">>>  low-pass with cutoff-freq %.1f" % reflp
                if refhp is not None:
                    print ">>> high-pass with cutoff-freq %.1f" % refhp

        # Adapt followg drop-chans cmd to use 'all-but-refpick'
        droplist = [
            raw.info['ch_names'][k] for k in xrange(raw.info['nchan'])
            if not k in refpick
        ]
        tct = time.clock()
        twt = time.time()
        fltref = raw.drop_channels(droplist, copy=True)
        if use_refantinotch:
            rawref = raw.drop_channels(droplist, copy=True)
            freqlast = np.min([5.01 * refnotch, 0.5 * raw.info['sfreq']])
            fltref.notch_filter(np.arange(refnotch, freqlast, refnotch),
                                picks=np.array(xrange(nref)),
                                method='iir')
            fltref._data = (rawref._data - fltref._data)
        else:
            fltref.filter(refhp,
                          reflp,
                          picks=np.array(xrange(nref)),
                          method='iir')
        tc1 = time.clock()
        tw1 = time.time()
        if verbose:
            print ">>> filtering ref-chans  took %.1f ms (%.2f s walltime)" % (
                1000. * (tc1 - tct), (tw1 - twt))

    if verbose:
        print "########## Calculating sig-ref/ref-ref-channel covariances:"
    # Calculate sig-ref/ref-ref-channel covariance:
    # (there is no need to calc inter-signal-chan cov,
    #  but there seems to be no appropriat fct available)
    # Here we copy the idea from compute_raw_data_covariance()
    # and truncate it as appropriate.
    tct = time.clock()
    twt = time.time()
    # The following reject and infosig entries are only
    # used in _is_good-calls.
    # _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to
    # ignore ref-channels (not covered by dict) and checks individual
    # data segments - artifacts across a buffer boundary are not found.

    #--- !!! FB put to kwargs

    #reject = dict(grad=4000e-13, # T / m (gradiometers)
    #              mag=4e-12,     # T (magnetometers)
    #              eeg=40e-6,     # uV (EEG channels)
    #              eog=250e-6)    # uV (EOG channels)

    infosig = copy.copy(raw.info)
    infosig['chs'] = [raw.info['chs'][k] for k in sigpick]
    infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]
    infosig['nchan'] = len(sigpick)
    idx_by_typesig = channel_indices_by_type(infosig)

    # Read data in chunks:
    tstep = 0.2
    itstep = int(ceil(tstep * raw.info['sfreq']))
    sigmean = 0
    refmean = 0
    sscovdata = 0
    srcovdata = 0
    rrcovdata = 0
    n_samples = 0

    for first in range(itmin, itmax, itstep):
        last = first + itstep
        if last >= itmax:
            last = itmax
        raw_segmentsig, times = raw[sigpick, first:last]
        if use_reffilter:
            raw_segmentref, times = fltref[:, first:last]
        else:
            raw_segmentref, times = raw[refpick, first:last]

        if not exclude_artifacts or \
           _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,
                    ignore_chs=raw.info['bads']):
            sigmean += raw_segmentsig.sum(axis=1)
            refmean += raw_segmentref.sum(axis=1)
            sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
            srcovdata += np.dot(raw_segmentsig, raw_segmentref.T)
            rrcovdata += np.dot(raw_segmentref, raw_segmentref.T)
            n_samples += raw_segmentsig.shape[1]
        else:
            logger.info("Artefact detected in [%d, %d]" % (first, last))
    if n_samples <= 1:
        raise ValueError('Too few samples to calculate weights')
    sigmean /= n_samples
    refmean /= n_samples
    sscovdata -= n_samples * sigmean[:] * sigmean[:]
    sscovdata /= (n_samples - 1)
    srcovdata -= n_samples * sigmean[:, None] * refmean[None, :]
    srcovdata /= (n_samples - 1)
    rrcovdata -= n_samples * refmean[:, None] * refmean[None, :]
    rrcovdata /= (n_samples - 1)
    sscovinit = np.copy(sscovdata)
    if verbose:
        print ">>> Normalize srcov..."

    rrslope = copy.copy(rrcovdata)
    for iref in xrange(nref):
        dtmp = rrcovdata[iref, iref]
        if dtmp > TINY:
            srcovdata[:, iref] /= dtmp
            rrslope[:, iref] /= dtmp
        else:
            srcovdata[:, iref] = 0.
            rrslope[:, iref] = 0.

    if verbose:
        print ">>> Number of samples used : %d" % n_samples
        tc1 = time.clock()
        tw1 = time.time()
        print ">>> sigrefchn covar-calc took %.1f ms (%.2f s walltime)" % (
            1000. * (tc1 - tct), (tw1 - twt))

    if checkresults:
        if verbose:
            print "########## Calculated initial signal channel covariance:"
            # Calculate initial signal channel covariance:
            # (only used as quality measure)
            print ">>> initl rt(avg sig pwr) = %12.5e" % np.sqrt(
                np.mean(sscovdata))
            for i in xrange(5):
                print ">>> initl signal-rms[%3d] = %12.5e" % (
                    i, np.sqrt(sscovdata.flatten()[i]))
            print ">>>"

    U, s, V = np.linalg.svd(rrslope, full_matrices=True)
    if verbose:
        print ">>> singular values:"
        print s
        print ">>> Applying cutoff for smallest SVs:"

    dtmp = s.max() * SVD_RELCUTOFF
    s *= (abs(s) >= dtmp)
    sinv = [1. / s[k] if s[k] != 0. else 0. for k in xrange(nref)]
    if verbose:
        print ">>> singular values (after cutoff):"
        print s

    stat = np.allclose(rrslope, np.dot(U, np.dot(np.diag(s), V)))
    if verbose:
        print ">>> Testing svd-result: %s" % stat
        if not stat:
            print "    (Maybe due to SV-cutoff?)"

    # Solve for inverse coefficients:
    # Set RRinv.tr=U diag(sinv) V
    RRinv = np.transpose(np.dot(U, np.dot(np.diag(sinv), V)))
    if checkresults:
        stat = np.allclose(np.identity(nref), np.dot(RRinv, rrslope))
        if stat:
            if verbose:
                print ">>> Testing RRinv-result (should be unit-matrix): ok"
        else:
            print ">>> Testing RRinv-result (should be unit-matrix): failed"
            print np.transpose(np.dot(RRinv, rrslope))
            print ">>>"

    if verbose:
        print "########## Calc weight matrix..."

    # weights-matrix will be somewhat larger than necessary,
    # (to simplify indexing in compensation loop):
    weights = np.zeros((raw._data.shape[0], nref))
    for isig in xrange(nsig):
        for iref in xrange(nref):
            weights[sigpick[isig], iref] = np.dot(srcovdata[isig, :],
                                                  RRinv[:, iref])

    if verbose:
        print "########## Compensating signal channels:"
        if complementary_signal:
            print ">>> Caveat: REPLACING signal by compensation signal"

    tct = time.clock()
    twt = time.time()

    # Work on entire data stream:
    for isl in xrange(raw._data.shape[1]):
        slice = np.take(raw._data, [isl], axis=1)
        if use_reffilter:
            refslice = np.take(fltref._data, [isl], axis=1)
            refarr = refslice[:].flatten() - refmean
            # refarr = fltres[:,isl]-refmean
        else:
            refarr = slice[refpick].flatten() - refmean
        subrefarr = np.dot(weights[:], refarr)

        if not complementary_signal:
            raw._data[:, isl] -= subrefarr
        else:
            raw._data[:, isl] = subrefarr

        if (isl % 10000 == 0) and verbose:
            print "\rProcessed slice %6d" % isl

    if verbose:
        print "\nDone."
        tc1 = time.clock()
        tw1 = time.time()
        print ">>> compensation loop took %.1f ms (%.2f s walltime)" % (
            1000. * (tc1 - tct), (tw1 - twt))

    if checkresults:
        if verbose:
            print "########## Calculating final signal channel covariance:"
        # Calculate final signal channel covariance:
        # (only used as quality measure)
        tct = time.clock()
        twt = time.time()
        sigmean = 0
        sscovdata = 0
        n_samples = 0
        for first in range(itmin, itmax, itstep):
            last = first + itstep
            if last >= itmax:
                last = itmax
            raw_segmentsig, times = raw[sigpick, first:last]
            # Artifacts found here will probably differ from pre-noisered artifacts!
            if not exclude_artifacts or \
               _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
                        flat=None, ignore_chs=raw.info['bads']):
                sigmean += raw_segmentsig.sum(axis=1)
                sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
                n_samples += raw_segmentsig.shape[1]
        sigmean /= n_samples
        sscovdata -= n_samples * sigmean[:] * sigmean[:]
        sscovdata /= (n_samples - 1)

        if verbose:
            print ">>> no channel got worse: ", np.all(
                np.less_equal(sscovdata, sscovinit))
            print ">>> final rt(avg sig pwr) = %12.5e" % np.sqrt(
                np.mean(sscovdata))
            for i in xrange(5):
                print ">>> final signal-rms[%3d] = %12.5e" % (
                    i, np.sqrt(sscovdata.flatten()[i]))
            tc1 = time.clock()
            tw1 = time.time()
            print ">>> signal covar-calc took %.1f ms (%.2f s walltime)" % (
                1000. * (tc1 - tct), (tw1 - twt))
            print ">>>"

#--- fb update 21.07.2015
    fname_out = jumeg_base.get_fif_name(raw=raw,
                                        postfix=fif_postfix,
                                        extention=fif_extention)

    if save:
        jumeg_base.apply_save_mne_data(raw, fname=fname_out, overwrite=True)

    tc1 = time.clock()
    tw1 = time.time()
    if verbose:
        print ">>> Total run took %.1f ms (%.2f s walltime)" % (1000. *
                                                                (tc1 - tc0),
                                                                (tw1 - tw0))

    return raw, fname_out
Esempio n. 18
0
    def _epochs_get_epochs_and_apply_baseline(self, raw, evt=None, picks=None):
        """generate epochs from raw and apply baseline correction if baseline is not None
        exclude epochs due to short baseline onset/offset intervall or
        epochs which will not fit in time window[timepre <> time_post]
        
        Parameters
        ----------
        raw obj
        evt: event dict
                                    
        evt['bc']['baseline']    : time range in sec [None,0.0]
                                   if evt['bc']['baseline'] is None or [] no baseline correction applied        
        evt['bc']['events']      : feed to mne.Epochs as <events>
        evt['bc']['event_id']    : feed to mne.Epochs as <event_id>
                   
        check for bad epochs due to short baseline onset/offset intervall and drop them
              
        Returns
        ----------
        updated event dict
         evt["epochs"]             : mne epoch obj
         evt["baseline_corrected"] : True if baseline correction 
         
        FYI: digital channels like <stimulus> and <response> are excluded from baseline correction
             e.g. <STI 013> <STI 014>
        """

        ep_bc_corrected = None
        evt['epochs'] = None
        evt['baseline_corrected'] = False

        if raw:
            self.raw = raw

    #--- update and load raw     obj
        self.raw, self.fname = jumeg_base.get_raw_obj(self.fname, raw=self.raw)

        #--- get epochs no bc correction
        ep = mne.Epochs(self.raw,
                        evt['events'],
                        event_id=evt['event_id'],
                        tmin=self.marker.time_pre,
                        tmax=self.marker.time_post,
                        baseline=None,
                        picks=picks,
                        reject=self.reject,
                        proj=self.proj,
                        preload=True,
                        verbose=False)
        ep.drop_bad()  #- exclude bad epochs e.g: to short

        if self.verbose:  # for later show difference min max with and without bc
            meg_picks = jumeg_base.picks.meg_nobads(self.raw)
            meg_min = ep._data[:, meg_picks, :].min()
            meg_max = ep._data[:, meg_picks, :].max()

    #--- calc baseline correction
        if self.marker.baseline.method:
            if evt['bc']['events'].any() and ep.selection.any():

                #--- ck for no unique baseline events and apply bc correction, standard task
                ep_bc_corrected = self._calc_baseline_correction_for_events(
                    ep, evt['bc']['events'])

                #--- ck for unique events and apply bc correction with unique baseline events, e.g. one baseline intervall used for multi stimuli
                if not ep_bc_corrected:
                    ep_bc_corrected = self._calc_baseline_correction_for_unique_events(
                        ep, evt['bc']['events'])

                if ep_bc_corrected:
                    evt['epochs'] = ep_bc_corrected
                    evt['baseline_corrected'] = True
        else:
            evt['epochs'] = ep

        if self.verbose:
            print " ---> Epocher apply epoch and baseline -> mne epochs:"
            print "   -> fname : " + self.fname
            print "      id: %d  <pre time>: %0.3f <post time>: %0.3f" % (
                evt['event_id'], self.marker.time_pre, self.marker.time_post)
            print "  --> baseline correction : %r" % (
                evt['baseline_corrected'])
            self.line()
            print " --> epoch info: "
            print "\n --> Epoch selection: {}".format(ep.selection.shape)
            print ep.selection
            self.line()
            print "  -> MEG min   : %0.15f" % (meg_min)
            print "  -> MEG min BC: %0.15f" % (
                evt['epochs']._data[:, meg_picks, :].min())
            print "  -> MEG max   : %0.15f" % (meg_max)
            print "  -> MEG max BC: %0.15f" % (
                evt['epochs']._data[:, meg_picks, :].max())
            self.line()
            if evt['baseline_corrected']:
                print "  -> done -> baseline correction"
                print "     bc id: %d  <pre time>: %0.3f <post time>: %0.3f" % (
                    evt['bc']['event_id'], self.marker.time_pre,
                    self.marker.time_post)
                self.line()
        return evt