def cached_chisq_bins(self, template, psd): from pycbc.opt import LimitedSizeDict key = id(psd) if not hasattr(psd, '_chisq_cached_key'): psd._chisq_cached_key = {} if not hasattr(template, '_bin_cache'): template._bin_cache = LimitedSizeDict(size_limite=2**2) if key not in template._bin_cache or id( template.params) not in psd._chisq_cached_key: psd._chisq_cached_key[id(template.params)] = True num_bins = int(self.parse_option(template, self.num_bins)) if hasattr( psd, 'sigmasq_vec') and template.approximant in psd.sigmasq_vec: kmin = int(template.f_lower / psd.delta_f) kmax = template.end_idx bins = power_chisq_bins_from_sigmasq_series( psd.sigmasq_vec[template.approximant], num_bins, kmin, kmax) else: bins = power_chisq_bins(template, num_bins, psd, template.f_lower) template._bin_cache[key] = bins return template._bin_cache[key]
def set_ref_time(self, inj, ts): """Sets t=0 of the given time series based on what the given injection's ``ref_point`` is. """ try: ref_point = inj.ref_point except AttributeError as _err: # Py3.XX: uncomment the "from _err" when we drop 2.7 raise ValueError("Must provide a ref_point for {} injections" .format(self.injtype)) #from _err # try to get from buffer if self._rtbuffer is None: self._rtbuffer = LimitedSizeDict(size_limit=self._buffersize) try: reftime = self._rtbuffer[inj.filename, ref_point] except KeyError: if ref_point == "start": reftime = 0. elif ref_point == "end": reftime = -len(ts)*ts.delta_t elif ref_point == "center": reftime = -len(ts)*ts.delta_t/2. elif ref_point == "absmax": reftime = -ts.abs_arg_max()*ts.delta_t elif isinstance(ref_point, (float, int)): reftime = -float(ref_point) else: raise ValueError("Unrecognized ref_point {} provided" .format(ref_point)) self._rtbuffer[inj.filename, ref_point] = reftime ts._epoch = reftime
def loadts(self, inj): """Loads an injection time series. After the first time a time series is loaded it will be added to an internal buffer for faster in case another injection uses the same series. """ if self._buffer is None: # create the buffer self._buffer = LimitedSizeDict(size_limit=self._buffersize) try: return self._buffer[inj.filename] except KeyError: pass # not in buffer, so load if inj.filename.endswith('.gwf'): try: channel = inj.channel except AttributeError as _err: # Py3.XX: uncomment the "from _err" when we drop 2.7 raise ValueError("Must provide a channel for " "frame files") #from _err ts = frame.read_frame(inj.filename, channel) else: ts = load_timeseries(inj.filename) # cache self._buffer[inj.filename] = ts return ts
def resize(self, new_size): """Resize self to new_size """ if new_size == len(self): return else: self._saved = LimitedSizeDict(size_limit=2**5) new_arr = zeros(new_size, dtype=self.dtype) if len(self) <= new_size: new_arr[0:len(self)] = self else: new_arr[:] = self[0:new_size] self._data = new_arr._data
def roll(self, shift): """shift vector """ self._saved = LimitedSizeDict(size_limit=2**5) new_arr = zeros(len(self), dtype=self.dtype) if shift == 0: return if shift < 0: shift=len(self) + shift new_arr[0:shift] = self[len(self)-shift: len(self)] new_arr[shift:len(self)] = self[0:len(self)-shift] self._data = new_arr._data
def __init__(self, initial_array, dtype=None, copy=True): """ initial_array: An array-like object as specified by NumPy, this also includes instances of an underlying data type as described in section 3 or an instance of the PYCBC Array class itself. This object is used to populate the data of the array. dtype: A NumPy style dtype that describes the type of encapsulated data (float32,compex64, etc) copy: This defines whether the initial_array is copied to instantiate the array or is simply referenced. If copy is false, new data is not created, and so all arguments that would force a copy are ignored. The default is to copy the given object. """ self._scheme = _scheme.mgr.state self._saved = LimitedSizeDict(size_limit=2**5) #Unwrap initial_array if isinstance(initial_array, Array): initial_array = initial_array._data if not copy: if not _scheme_matches_base_array(initial_array): raise TypeError("Cannot avoid a copy of this array") else: self._data = initial_array # Check that the dtype is supported. if self._data.dtype not in _ALLOWED_DTYPES: raise TypeError(str(self._data.dtype) + ' is not supported') if dtype and dtype != self._data.dtype: raise TypeError("Can only set dtype when allowed to copy data") if copy: # First we will check the dtype that we are given if not hasattr(initial_array, 'dtype'): initial_array = _numpy.array(initial_array) # Determine the dtype to use if dtype is not None: dtype = _numpy.dtype(dtype) if dtype not in _ALLOWED_DTYPES: raise TypeError(str(dtype) + ' is not supported') if dtype.kind != 'c' and initial_array.dtype.kind == 'c': raise TypeError( str(initial_array.dtype) + ' cannot be cast as ' + str(dtype)) elif initial_array.dtype in _ALLOWED_DTYPES: dtype = initial_array.dtype else: if initial_array.dtype.kind == 'c': dtype = complex128 else: dtype = float64 # Cast to the final dtype if needed if initial_array.dtype != dtype: initial_array = initial_array.astype(dtype) #Create new instance with initial_array as initialization. if issubclass(type(self._scheme), _scheme.CPUScheme): if hasattr(initial_array, 'get'): self._data = _numpy.array(initial_array.get()) else: self._data = _numpy.array(initial_array, dtype=dtype, ndmin=1) elif _scheme_matches_base_array(initial_array): self._data = _copy_base_array(initial_array) # pylint:disable=assignment-from-no-return else: initial_array = _numpy.array(initial_array, dtype=dtype, ndmin=1) self._data = _to_device(initial_array) # pylint:disable=assignment-from-no-return