def run_as_void(*args, **kwargs): # method_has_output_arg = True default_args = get_default_args(method) default_out = default_args[output_kwarg] out = kwargs.pop(output_kwarg, default_out) inplace = kwargs.pop('inplace', False) void_method = fix_signature_for_split(output_kwarg, cancel_output=True)(method) # print('out was', type(out)) # If the method normally runs inplace and the output arg is None, # then run the void wrapped method with just the normal array splitting. # In all other cases, split the output variable if runs_inplace and out is default_out: para_method = split_at(**split_kwargs)(method) full_args = args else: void_split_args = split_kwargs.copy() # split the last arg that is added by to_void() split_arg = split_kwargs.get( 'split_arg', get_default_args(split_at)['split_arg']) full_split_arg = split_arg + (len(args), ) void_split_args['split_arg'] = full_split_arg # split and run as void, then return the output para_method = split_at(**void_split_args)(void_method) if out is default_out: use_shared = para_method(*args, None, check_parallel=True) try: out = allocate_output(args, kwargs, void_split_args, inplace=inplace, shared=use_shared, same_shape=same_shape, calc_shape=calc_shape) except ValueError: raise ValueError('output argument "out" is required') # if out is default_out: # split_input = args[split_arg[0]] # if para_method(*args, None, check_parallel=True): # print('creating shared output') # shape = split_input.shape # tcode = split_input.dtype.char # out = pctx.shared_ndarray(shape, typecode=tcode) # else: # print('creating plain output') # # don't bother with shared # out = np.empty_like(split_input) full_args = args + (out, ) # Run the method and return the output (or None, if output was not set) r = para_method(*full_args, **kwargs) return fix_output(out, r)
def allocate_output(call_args: tuple, call_kwargs: dict, split_kwargs: dict, inplace: bool = False, shared: bool = True, same_shape: bool = True, calc_shape: callable = None): """ Allocate memory for output from parallel jobs """ if not (same_shape or calc_shape): raise ValueError default_args = get_default_args(split_at) split_input = call_args[split_kwargs.get('split_arg', default_args['split_arg'])[0]] if same_shape: shape = split_input.shape dtype = split_input.dtype else: shape, dtype = calc_shape(call_args, call_kwargs) if inplace: # print('using input for output') # the output will be placed in the split arg # (or the first split arg, if that makes sense?) out = split_input elif shared: # print('creating shared output') out = pctx.shared_ndarray(shape, typecode=dtype.char) else: # print('creating plain output') # don't bother with shared out = np.empty(shape, dtype=dtype) return out
def notch_all( arr, Fs, lines=60.0, nzo=3, nwid=3.0, inplace=True, nmax=None, **filt_kwargs ): """Apply notch filtering to a array timeseries. Parameters ---------- arr : ndarray timeseries Fs : float sampling frequency lines : [list of] float(s) One or more lines to notch. nzo : int (default 3) Number of zeros for the notch filter (more zeros --> deeper notch). nwid : float (default 3) Affects distance of notch poles from zeros (smaller --> closer). Zeros occur on the unit disk in the z-plane. Note that the stability of a digital filter depends on poles being within the unit disk. nmax : float (optional) If set, notch all multiples of (scalar-valued) lines up to nmax. Returns ------- notched : ndarray """ if inplace: # If filtering inplace, then set the output array as such filt_kwargs['out'] = arr elif filt_kwargs.get('out', None) is None: # If inplace is False and no output array is set, # then make the array copy here and do inplace filtering on the copy arr_f = shm.shared_copy(arr) arr = arr_f filt_kwargs['out'] = arr # otherwise an output array is set if isinstance(lines, (float, int)): # repeat lines until nmax nf = lines if nmax is None: nmax = nf nmax = min(nmax, Fs / 2.0) lines = [nf * i for i in range(1, int(nmax // nf) + 1)] else: lines = [x for x in lines if x < Fs/2] notch_defs = get_default_args(notch) notch_defs['nwid'] = nwid notch_defs['nzo'] = nzo notch_defs['Fs'] = Fs for nf in lines: notch_defs['fcut'] = nf arr_f = filter_array(arr, 'notch', inplace=False, design_kwargs=notch_defs, **filt_kwargs) return arr_f
def run_para_void(*args, **kwargs): out = kwargs.pop('out', None) void_method = to_void(method, clear_position=position) # print('out was', type(out)) inplace = kwargs.pop('inplace', False) void_split_args = split_kwargs.copy() # split the last arg that is added by to_void() split_arg = split_kwargs.get( 'split_arg', get_default_args(split_at)['split_arg']) full_split_arg = split_arg + (len(args), ) void_split_args['split_arg'] = full_split_arg # split and run as void, then return the output para_method = split_at(**void_split_args)(void_method) if out is None: use_shared = para_method(*args, None, check_parallel=True) try: out = allocate_output(args, kwargs, split_kwargs, inplace=inplace, shared=use_shared, same_shape=same_shape, calc_shape=calc_shape) except ValueError: raise ValueError('output argument "out" is required') # # print('out is None') # if not (same_shape or calc_shape): # raise ValueError('output argument "out" is required') # split_input = args[split_arg[0]] # if same_shape: # shape = split_input.shape # dtype = split_input.dtype # else: # shape, dtype = calc_shape(*args, **kwargs) # if inplace: # print('using input for output') # # the output will be placed in the split arg # # (or the first split arg, if that makes sense?) # out = split_input # elif para_method(*args, None, check_parallel=True): # print('creating shared output') # out = pctx.shared_ndarray(shape, typecode=dtype.char) # else: # print('creating plain output') # # don't bother with shared # out = np.empty(shape, dtype=dtype) full_args = args + (out, ) r = para_method(*full_args, **kwargs) return fix_output(out, r)
def _get_poles_zeros(destype, **filt_args): if destype.lower().startswith('butter'): return butter_bp(**filt_args) des_lookup = dict(cheby1=cheby1_bp, cheby2=cheby2_bp, notch=notch) desfun = des_lookup[destype] def_args = get_default_args(desfun) extra_arg = [k for k in list(filt_args.keys()) if k not in list(def_args.keys())] # should only be one extra key if len(extra_arg) > 1: raise ValueError('too many arguments for filter type '+destype) extra_arg = filt_args.pop(extra_arg.pop()) return desfun(extra_arg, **filt_args)
def translate_legacy_config_options(*args, **options): loader_kwargs = options.copy() useFs = loader_kwargs.pop('useFs', -1) downsamp = loader_kwargs.pop('downsamp', 1) opt_keys = set(loader_kwargs.keys()) loader_options = set(get_default_args(OpenEphysLoader).keys()) extra_args = opt_keys.difference(loader_options) if len(extra_args): warnings.warn('Extra arguments were dropped {}'.format(extra_args), FutureWarning) # just silently drop this one for k in extra_args: loader_kwargs.pop(k, None) # Anticipate some crappy values that might be specified alongside real information in "usefs" or "downsamp" if 'resample_rate' in loader_kwargs: rate = loader_kwargs['resample_rate'] if isinstance(rate, (int, float)) and rate <= 0: loader_kwargs['resample_rate'] = None if loader_kwargs.get('resample_rate', None) is None: # If possible, rename useFs to resample_rate if useFs > 0: loader_kwargs['resample_rate'] = useFs elif downsamp > 1: # If downsample factor is given, try to find the original sample rate (needs access to primary data) try: loader_info = OpenEphysLoader(*args) loader_kwargs['resample_rate'] = loader_info.raw_sample_rate( ) / downsamp except DataPathError: raise RuntimeError( 'Could not find the original sampling rate needed to compute a ' 'resample rate given the downsample factor of {}. Try loading again ' 'specifying "resample_rate" in Hz to see if a pre-computed downsample ' 'can be loaded.'.format(downsamp)) return loader_kwargs
def filter_array( arr, ftype='butterworth', inplace=True, out=None, block_filter='parallel', design_kwargs=dict(), filt_kwargs=dict() ): """ Filter an ND array timeseries on the last dimension. For computational efficiency, the timeseries are blocked into partitions (10000 points by default) and split over multiple threads (not supported on Windoze). Parameters ---------- arr: ndarray Timeseries in the last dimension (can be 1D). ftype: str Filter type to design. inplace: bool If True, then arr must be a shared memory array. Otherwise a shared copy will be made from the input. This is a shortcut for using "out=arr". out: ndarray If not None, place filter output here (if inplace is specified, any output array is ignored). block_filter: str or callable Specify the run-time block filter to apply. Can be "parallel" or "serial", or can be a callable that follows the basic signature of `ecogdata.filt.time.block_filter.bfilter`. design_kwargs: dict Design parameters for the filter (e.g. lo, hi, Fs, ord) filt_kwargs: dict Processing parameters (e.g. filtfilt, bsize) Returns ------- arr_f: ndarray Filtered timeseries, same shape as input. """ b, a = _get_poles_zeros(ftype, **design_kwargs) check_shm = False if isinstance(block_filter, str): if block_filter.lower() == 'parallel': from ecogdata.parallel.split_methods import bfilter block_filter = bfilter check_shm = True elif block_filter.lower() == 'serial': from .blocked_filter import bfilter block_filter = bfilter else: raise ValueError('Block filter type not known: {}'.format(block_filter)) if not callable(block_filter): raise ValueError('Provided block filter is not callable: {}'.format(block_filter)) def_args = get_default_args(block_filter) # Set some defaults and then update with filt_kwargs def_args['bsize'] = 10000 def_args['filtfilt'] = True def_args.update(filt_kwargs) def_args['out'] = out if inplace: # enforce that def_args['out'] is arr? def_args['out'] = arr block_filter(b, a, arr, **def_args) return arr else: # still use bfilter for memory efficiency # Work in progress to use this syntax # use_shm = hasattr(block_filter, 'uses_parallel') and block_filter.uses_parallel(b, a, arr) if check_shm: # signal shared memory usage if check_shm remains true try: check_shm = block_filter(b, a, arr, check_parallel=True, **def_args) except TypeError: check_shm = False if def_args['out'] is None: if check_shm: arr_f = shm.shared_ndarray(arr.shape, arr.dtype.char) else: arr_f = np.empty_like(arr) def_args['out'] = arr_f block_filter(b, a, arr, **def_args) return def_args['out']