def _prep_toilim_avg(self): """ Set up averaging data across trials given `toilim` selection Parameters ---------- self : Syncopy data object Input object that is being processed by the respective :func:`~syncopy.singlepanelplot` or :func:`~syncopy.multipanelplot` function/class method. Returns ------- tLengths : 1D :class:`numpy.ndarray` Array of length `nSelectedTrials` with each element encoding the number of samples contained in the provided `toilim` selection. Notes ----- If `tLengths` contains more than one unique element, a :class:`~syncopy.shared.errors.SPYValueError` is raised. Note further, that this is an auxiliary method that is intended purely for internal use. Please refer to the user-exposed methods :func:`~syncopy.singlepanelplot` and/or :func:`~syncopy.multipanelplot` to actually generate plots of Syncopy data objects. See also -------- :func:`~syncopy.singlepanelplot` : visualize Syncopy objects using single-panel figure(s) :func:`~syncopy.multipanelplot` : visualize Syncopy objects using multi-panel figure(s) """ tLengths = np.zeros((len(self._selection.trials), ), dtype=np.intp) for k, tsel in enumerate(self._selection.time): if not isinstance(tsel, slice): msg = "Cannot average `toilim` selection. Please check `.time` property for consistency. " raise SPYError(msg) start, stop = tsel.start, tsel.stop if start is None: start = 0 if stop is None: stop = self._get_time([self._selection.trials[k]], toilim=[-np.inf, np.inf])[0].stop tLengths[k] = stop - start if np.unique(tLengths).size > 1: lgl = "time-selections of equal length for averaging across trials" act = "time-selections of varying length" raise SPYValueError(legal=lgl, varname="toilim", actual=act) if tLengths[0] < 2: lgl = "time-selections containing at least two samples" act = "time-selections containing fewer than two samples" raise SPYValueError(legal=lgl, varname="toilim", actual=act) return tLengths
def _prep_plots(self, name, **inputs): """ Helper performing most basal error checking for all plotting sub-routines Parameters ---------- self : Syncopy data object Input object that is being processed by the respective :func:`~syncopy.singlepanelplot` or :func:`~syncopy.multipanelplot` function/class method. name : str Name of caller (i.e., "singlepanelplot" or "multipanelplot") inputArgs : dict Input arguments of caller (i.e., :func:`~syncopy.singlepanelplot` or :func:`~syncopy.multipanelplot`) collected in dictionary Returns ------- Nothing : None Notes ----- This is an auxiliary method that is intended purely for internal use. Please refer to the user-exposed methods :func:`~syncopy.singlepanelplot` and/or :func:`~syncopy.multipanelplot` to actually generate plots of Syncopy data objects. See also -------- :meth:`syncopy.plotting._plot_spectral._prep_spectral_plots` : sanity checks and data selection for plotting :class:`~syncopy.SpectralData` objects :meth:`syncopy.plotting._plot_analog._prep_analog_plots` : sanity checks and data selection for plotting :class:`~syncopy.AnalogData` objects """ # Abort if matplotlib is not available if not __plt__: raise SPYError(pltErrMsg.format(name)) # Abort if in-place selection is attempted if inputs.get("kwargs", {}).get("select") is not None: msg = "In-place data-selection not supported in plotting routines. " + \ "Please use method-specific keywords (`trials`, `channels`, etc.) instead. " raise SPYError(msg)
def wrapper_cfg(*args, **kwargs): # First, parse positional arguments for dict-type inputs (`k` counts the # no. of dicts provided) and convert tuple of positional args to list cfg = None k = 0 args = list(args) for argidx, arg in enumerate(args): if isinstance(arg, dict): cfgidx = argidx k += 1 # If a dict was found, assume it's a `cfg` dict and extract it from # the positional argument list; if more than one dict was found, abort if k == 1: cfg = args.pop(cfgidx) elif k > 1: raise SPYValueError( legal="single `cfg` input", varname="cfg", actual="{0:d} `cfg` objects in input arguments".format(k)) # Now parse provided keywords for `cfg` entry - if `cfg` was already # provided as positional argument, abort if kwargs.get("cfg") is not None: if cfg: lgl = "`cfg` either as positional or keyword argument, not both" raise SPYValueError(legal=lgl, varname="cfg") cfg = kwargs.pop("cfg") # If `cfg` was detected either in positional or keyword arguments, process it if cfg: # If `cfg` is not dict-like, abort (`StructDict` is a `dict` child) if not isinstance(cfg, dict): raise SPYTypeError(cfg, varname="cfg", expected="dictionary-like") # IMPORTANT: create a copy of `cfg` using `StructDict` constructor to # not manipulate `cfg` in user's namespace! cfg = StructDict(cfg) # FIXME # If a method is called using `cfg`, non-default values for # keyword arguments must *only* to be provided via `cfg` defaults = get_defaults(func) for key, value in kwargs.items(): if defaults.get(key, value) != value: raise SPYValueError( legal="no keyword arguments", varname=key, actual="non-default value for {}".format(key)) # Translate any existing "yes" and "no" fields to `True` and `False` for key in cfg.keys(): if str(cfg[key]) == "yes": cfg[key] = True elif str(cfg[key]) == "no": cfg[key] = False # No explicit `cfg`: rename `kwargs` to `cfg` to consolidate processing below; # IMPORTANT: this does *not* create a copy of `kwargs`, thus the `pop`-ing # below actually manipulates `kwargs` as well - crucial for the `kwargs.get("data")` # error checking! else: cfg = kwargs # If `cfg` contains keys 'data' or 'dataset' extract corresponding # entry and make it a positional argument (abort if both 'data' # and 'dataset' are present) data = cfg.pop("data", None) if cfg.get("dataset"): if data: lgl = "either 'data' or 'dataset' in `cfg`/keywords, not both" raise SPYValueError(legal=lgl, varname="cfg") data = cfg.pop("dataset") # If `cfg` did not contain `data`, look into `kwargs` if data is None: data = kwargs.pop("data", None) if kwargs.get("dataset"): if data: lgl = "either `data` or `dataset` keyword, not both" raise SPYValueError(legal=lgl, varname="data/dataset") data = kwargs.pop("dataset") # If Syncopy data object(s) were provided convert single objects to one-element # lists, ensure positional args do *not* contain add'l objects; ensure keyword # args (besides `cfg`) do *not* contain add'l objects; ensure `data` exclusively # contains Syncopy data objects. Finally, rename remaining positional arguments if data: if not isinstance(data, (tuple, list)): data = [data] if any([ isinstance(arg, spy.datatype.base_data.BaseData) for arg in args ]): lgl = "Syncopy data object(s) provided either via `cfg`/keyword or " +\ "positional arguments, not both" raise SPYValueError(legal=lgl, varname="cfg/data") if kwargs.get("data") or kwargs.get("dataset"): lgl = "Syncopy data object(s) provided either via `cfg` or as " +\ "keyword argument, not both" raise SPYValueError(legal=lgl, varname="cfg.data") if any([ not isinstance(obj, spy.datatype.base_data.BaseData) for obj in data ]): raise SPYError("`data` must be Syncopy data object(s)!") posargs = args # If `data` was not provided via `cfg` or as kw-arg, parse positional arguments if data is None: data = [] posargs = [] while args: arg = args.pop(0) if isinstance(arg, spy.datatype.base_data.BaseData): data.append(arg) else: posargs.append(arg) # Call function with unfolded `data` + modified positional/keyword args return func(*data, *posargs, **cfg)
def save(out, container=None, tag=None, filename=None, overwrite=False, memuse=100): r"""Save Syncopy data object to disk The underlying array data object is stored in a HDF5 file, the metadata in a JSON file. Both can be placed inside a Syncopy container, which is a regular directory with the extension '.spy'. Parameters ---------- out : Syncopy data object Object to be stored on disk. container : str Path to Syncopy container folder (\*.spy) to be used for saving. If omitted, the extension '.spy' will be added to the folder name. tag : str Tag to be appended to container basename filename : str Explicit path to data file. This is only necessary if the data should not be part of a container folder. An extension (\*.<dataclass>) is added if omitted. The `tag` argument is ignored. overwrite : bool If `True` an existing HDF5 file and its accompanying JSON file is overwritten (without prompt). memuse : scalar Approximate in-memory cache size (in MB) for writing data to disk (only relevant for :class:`syncopy.VirtualData` or memory map data sources) Returns ------- Nothing : None Notes ------ Syncopy objects may also be saved using the class method ``.save`` that acts as a wrapper for :func:`syncopy.save`, e.g., >>> save(obj, container="new_spy_container") is equivalent to >>> obj.save(container="new_spy_container") However, once a Syncopy object has been saved, the class method ``.save`` can be used as a shortcut to quick-save recent changes, e.g., >>> obj.save() writes the current state of `obj` to the data/meta-data files on-disk associated with `obj` (overwriting both in the process). Similarly, >>> obj.save(tag='newtag') saves `obj` in the current container 'new_spy_container' under a different tag. Examples -------- Save the Syncopy data object `obj` on disk in the current working directory without creating a spy-container >>> spy.save(obj, filename="session1") >>> # --> os.getcwd()/session1.<dataclass> >>> # --> os.getcwd()/session1.<dataclass>.info Save `obj` without creating a spy-container using an absolute path >>> spy.save(obj, filename="/tmp/session1") >>> # --> /tmp/session1.<dataclass> >>> # --> /tmp/session1.<dataclass>.info Save `obj` in a new spy-container created in the current working directory >>> spy.save(obj, container="container.spy") >>> # --> os.getcwd()/container.spy/container.<dataclass> >>> # --> os.getcwd()/container.spy/container.<dataclass>.info Save `obj` in a new spy-container created by providing an absolute path >>> spy.save(obj, container="/tmp/container.spy") >>> # --> /tmp/container.spy/container.<dataclass> >>> # --> /tmp/container.spy/container.<dataclass>.info Save `obj` in a new (or existing) spy-container under a different tag >>> spy.save(obj, container="session1.spy", tag="someTag") >>> # --> os.getcwd()/session1.spy/session1_someTag.<dataclass> >>> # --> os.getcwd()/session1.spy/session1_someTag.<dataclass>.info See also -------- syncopy.load : load data created with :func:`syncopy.save` """ # Make sure `out` is a valid Syncopy data object data_parser(out, varname="out", writable=None, empty=False) if filename is None and container is None: raise SPYError('filename and container cannot both be `None`') if container is not None and filename is None: # construct filename from container name if not isinstance(container, str): raise SPYTypeError(container, varname="container", expected="str") if not os.path.splitext(container)[1] == ".spy": container += ".spy" fileInfo = filename_parser(container) filename = os.path.join(fileInfo["folder"], fileInfo["container"], fileInfo["basename"]) # handle tag if tag is not None: if not isinstance(tag, str): raise SPYTypeError(tag, varname="tag", expected="str") filename += '_' + tag elif container is not None and filename is not None: raise SPYError( "container and filename cannot be used at the same time") if not isinstance(filename, str): raise SPYTypeError(filename, varname="filename", expected="str") # add extension if not part of the filename if "." not in os.path.splitext(filename)[1]: filename += out._classname_to_extension() try: scalar_parser(memuse, varname="memuse", lims=[0, np.inf]) except Exception as exc: raise exc if not isinstance(overwrite, bool): raise SPYTypeError(overwrite, varname="overwrite", expected="bool") # Parse filename for validity and construct full path to HDF5 file fileInfo = filename_parser(filename) if fileInfo["extension"] != out._classname_to_extension(): raise SPYError("""Extension in filename ({ext}) does not match data class ({dclass})""".format(ext=fileInfo["extension"], dclass=out.__class__.__name__)) dataFile = os.path.join(fileInfo["folder"], fileInfo["filename"]) # If `out` is to replace its own on-disk representation, be more careful if overwrite and dataFile == out.filename: replace = True else: replace = False # Prevent `out` from trying to re-create its own data file if replace: out.data.flush() h5f = out.data.file dat = out.data trl = h5f["trialdefinition"] else: if not os.path.exists(fileInfo["folder"]): try: os.makedirs(fileInfo["folder"]) except IOError: raise SPYIOError(fileInfo["folder"]) except Exception as exc: raise exc else: if os.path.exists(dataFile): if not os.path.isfile(dataFile): raise SPYIOError(dataFile) if overwrite: try: h5f = h5py.File(dataFile, mode="w") h5f.close() except Exception as exc: msg = "Cannot overwrite {} - file may still be open. " msg += "Original error message below\n{}" raise SPYError(msg.format(dataFile, str(exc))) else: raise SPYIOError(dataFile, exists=True) h5f = h5py.File(dataFile, mode="w") # Save each member of `_hdfFileDatasetProperties` in target HDF file for datasetName in out._hdfFileDatasetProperties: dataset = getattr(out, datasetName) # Member is a memory map if isinstance(dataset, np.memmap): # Given memory cap, compute how many data blocks can be grabbed # per swipe (divide by 2 since we're working with an add'l tmp array) memuse *= 1024**2 / 2 nrow = int( memuse / (np.prod(dataset.shape[1:]) * dataset.dtype.itemsize)) rem = int(dataset.shape[0] % nrow) n_blocks = [nrow] * int( dataset.shape[0] // nrow) + [rem] * int(rem > 0) # Write data block-wise to dataset (use `clear` to wipe blocks of # mem-maps from memory) dat = h5f.create_dataset(datasetName, dtype=dataset.dtype, shape=dataset.shape) for m, M in enumerate(n_blocks): dat[m * nrow:m * nrow + M, :] = out.data[m * nrow:m * nrow + M, :] out.clear() # Member is a HDF5 dataset else: dat = h5f.create_dataset(datasetName, data=dataset) # Now write trial-related information trl_arr = np.array(out.trialdefinition) if replace: trl[()] = trl_arr trl.flush() else: trl = h5f.create_dataset("trialdefinition", data=trl_arr, maxshape=(None, trl_arr.shape[1])) # Write to log already here so that the entry can be exported to json infoFile = dataFile + FILE_EXT["info"] out.log = "Wrote files " + dataFile + "\n\t\t\t" + 2 * " " + infoFile # While we're at it, write cfg entries out.cfg = { "method": sys._getframe().f_code.co_name, "files": [dataFile, infoFile] } # Assemble dict for JSON output: order things by their "readability" outDict = OrderedDict(startInfoDict) outDict["filename"] = fileInfo["filename"] outDict["dataclass"] = out.__class__.__name__ outDict["data_dtype"] = dat.dtype.name outDict["data_shape"] = dat.shape outDict["data_offset"] = dat.id.get_offset() outDict["trl_dtype"] = trl.dtype.name outDict["trl_shape"] = trl.shape outDict["trl_offset"] = trl.id.get_offset() if isinstance(out.data, np.ndarray): if np.isfortran(out.data): outDict["order"] = "F" else: outDict["order"] = "C" for key in out._infoFileProperties: value = getattr(out, key) if isinstance(value, np.ndarray): value = value.tolist() # potentially nested dicts elif isinstance(value, dict): value = dict(value) _dict_converter(value) outDict[key] = value # Save relevant stuff as HDF5 attributes for key in out._hdfFileAttributeProperties: if outDict[key] is None: h5f.attrs[key] = "None" else: try: h5f.attrs[key] = outDict[key] except RuntimeError: msg = "Too many entries in `{}` - truncating HDF5 attribute. " +\ "Please refer to {} for complete listing." info_fle = os.path.split( os.path.split(filename.format(ext=FILE_EXT["info"]))[0])[1] info_fle = os.path.join( info_fle, os.path.basename(filename.format(ext=FILE_EXT["info"]))) SPYWarning(msg.format(key, info_fle)) h5f.attrs[key] = [outDict[key][0], "...", outDict[key][-1]] # Re-assign filename after saving (and remove source in case it came from `__storage__`) if not replace: h5f.close() if __storage__ in out.filename: out.data.file.close() os.unlink(out.filename) out.data = dataFile # Compute checksum and finally write JSON (automatically overwrites existing) outDict["file_checksum"] = hash_file(dataFile) with open(infoFile, 'w') as out_json: json.dump(outDict, out_json, indent=4) return
def singlepanelplot(self, trials="all", channels="all", tapers="all", toilim=None, foilim=None, avg_channels=True, avg_tapers=True, interp="spline36", cmap="plasma", vmin=None, vmax=None, title=None, grid=None, fig=None, **kwargs): """ Plot contents of :class:`~syncopy.SpectralData` objects using single-panel figure(s) Please refer to :func:`syncopy.singlepanelplot` for detailed usage information. Examples -------- Show frequency range 30-80 Hz of channel `'ecog_mua2'` averaged across trials 2, 4, and 6: >>> fig = spy.singlepanelplot(freqData, trials=[2, 4, 6], channels=["ecog_mua2"], foilim=[30, 80]) Overlay channel `'ecog_mua3'` with same settings: >>> fig2 = spy.singlepanelplot(freqData, trials=[2, 4, 6], channels=['ecog_mua3'], foilim=[30, 80], fig=fig) Plot time-frequency contents of channel `'ecog_mua1'` present in both objects `tfData1` and `tfData2` using the 'viridis' colormap, a plot grid, manually defined lower and upper color value limits and no interpolation >>> fig1, fig2 = spy.singlepanelplot(tfData1, tfData2, channels=['ecog_mua1'], cmap="viridis", vmin=0.25, vmax=0.95, interp=None, grid=True, overlay=False) Note that overlay plotting is **not** supported for time-frequency objects. See also -------- syncopy.singlepanelplot : visualize Syncopy data objects using single-panel plots """ # Collect input arguments in dict `inputArgs` and process them inputArgs = locals() inputArgs.pop("self") (dimArrs, dimCounts, isTimeFrequency, complexConversion, pltDtype, dataLbl) = _prep_spectral_plots(self, "singlepanelplot", **inputArgs) (nTrials, nChan, nFreq, nTap) = dimCounts (trList, chArr, freqArr, tpArr) = dimArrs # If we're overlaying, ensure data and plot type match up if hasattr(fig, "objCount"): if isTimeFrequency: msg = "Overlay plotting not supported for time-frequency data" raise SPYError(msg) if not hasattr(fig, "spectralPlot"): lgl = "figure visualizing data from a Syncopy `SpectralData` object" act = "visualization of other Syncopy data" raise SPYValueError(legal=lgl, varname="fig", actual=act) if hasattr(fig, "multipanelplot"): lgl = "single-panel figure generated by `singleplot`" act = "multi-panel figure generated by `multipanelplot`" raise SPYValueError(legal=lgl, varname="fig", actual=act) # No time-frequency shenanigans: this is a simple power-spectrum (line-plot) if not isTimeFrequency: # Generic titles for figures overlayTitle = "Overlay of {} datasets" # Either create new figure or fetch existing if fig is None: fig, ax = _setup_figure(1, xLabel="Frequency [Hz]", yLabel=dataLbl, grid=grid) fig.spectralPlot = True else: ax, = fig.get_axes() # Average across channels, tapers or both using local helper func nTime = 1 if not avg_channels and not avg_tapers and nTap > 1: msg = "Either channels or trials need to be averaged for single-panel plot" SPYWarning(msg) return if avg_channels and not avg_tapers: panelTitle = "{} tapers averaged across {} channels and {} trials".format( nTap, nChan, nTrials) pltArr = _compute_pltArr(self, nFreq, nTap, nTime, complexConversion, pltDtype, avg1="channel") if avg_tapers and not avg_channels: panelTitle = "{} channels averaged across {} tapers and {} trials".format( nChan, nTap, nTrials) pltArr = _compute_pltArr(self, nFreq, nChan, nTime, complexConversion, pltDtype, avg1="taper") if avg_tapers and avg_channels: panelTitle = "Average of {} channels, {} tapers and {} trials".format( nChan, nTap, nTrials) pltArr = _compute_pltArr(self, nFreq, 1, nTime, complexConversion, pltDtype, avg1="taper", avg2="channel") # Perform the actual plotting ax.plot(freqArr, np.log10(pltArr), label=os.path.basename(self.filename)) ax.set_xlim([freqArr[0], freqArr[-1]]) # Set plot title depending on dataset overlay if fig.objCount == 0: if title is None: title = panelTitle ax.set_title(title, size=pltConfig["singleTitleSize"]) else: handles, labels = ax.get_legend_handles_labels() ax.legend(handles, labels) if title is None: title = overlayTitle.format(len(handles)) ax.set_title(title, size=pltConfig["singleTitleSize"]) else: # For a single-panel TF visualization, we need to average across both tapers + channels if not avg_channels and (not avg_tapers and nTap > 1): msg = "Single-panel time-frequency visualization requires averaging " +\ "across both tapers and channels" SPYWarning(msg) return # Compute (and verify) length of selected time intervals and assemble array for plotting panelTitle = "Average of {} channels, {} tapers and {} trials".format( nChan, nTap, nTrials) tLengths = _prep_toilim_avg(self) nTime = tLengths[0] pltArr = _compute_pltArr(self, nFreq, 1, nTime, complexConversion, pltDtype, avg1="taper", avg2="channel") # Prepare figure fig, ax, cax = _setup_figure(1, xLabel="Time [s]", yLabel="Frequency [Hz]", include_colorbar=True, grid=grid) fig.spectralPlot = True # Use `imshow` to render array as image time = self.time[trList[0]][self._selection.time[0]] ax.imshow(pltArr, origin="lower", interpolation=interp, cmap=cmap, vmin=vmin, vmax=vmax, extent=(time[0], time[-1], freqArr[0], freqArr[-1]), aspect="auto") cbar = _setup_colorbar(fig, ax, cax, label=dataLbl.replace(" [dB]", "")) if title is None: title = panelTitle ax.set_title(title, size=pltConfig["singleTitleSize"]) # Increment overlay-counter and draw figure fig.objCount += 1 plt.draw() self._selection = None return fig
def multipanelplot(self, trials="all", channels="all", tapers="all", toilim=None, foilim=None, avg_channels=False, avg_tapers=True, avg_trials=True, panels="channels", interp="spline36", cmap="plasma", vmin=None, vmax=None, title=None, grid=None, fig=None, **kwargs): """ Plot contents of :class:`~syncopy.SpectralData` objects using multi-panel figure(s) Please refer to :func:`syncopy.multipanelplot` for detailed usage information. Examples -------- Use 16 panels to show frequency range 30-80 Hz of first 16 channels in `freqData` averaged across trials 2, 4, and 6: >>> fig = spy.multipanelplot(freqData, trials=[2, 4, 6], channels=range(16), foilim=[30, 80], panels="channels") Same settings, but each panel represents a trial: >>> fig = spy.multipanelplot(freqData, trials=[2, 4, 6], channels=range(16), foilim=[30, 80], panels="trials", avg_trials=False, avg_channels=True) Plot time-frequency contents of channels `'ecog_mua1'` and `'ecog_mua2'` of `tfData` >>> fig = spy.multipanelplot(tfData, channels=['ecog_mua1', 'ecog_mua2']) Note that multi-panel overlay plotting is **not** supported for :class:`~syncopy.SpectralData` objects. See also -------- syncopy.multipanelplot : visualize Syncopy data objects using multi-panel plots """ # Collect input arguments in dict `inputArgs` and process them inputArgs = locals() inputArgs.pop("self") (dimArrs, dimCounts, isTimeFrequency, complexConversion, pltDtype, dataLbl) = _prep_spectral_plots(self, "multipanelplot", **inputArgs) (nTrials, nChan, nFreq, nTap) = dimCounts (trList, chArr, freqArr, tpArr) = dimArrs # No overlaying here... if hasattr(fig, "objCount"): msg = "Overlays of multi-panel `SpectralData` plots not supported" raise SPYError(msg) # Ensure panel-specification makes sense and is compatible w/averaging selection if not isinstance(panels, str): raise SPYTypeError(panels, varname="panels", expected="str") if panels not in availablePanels: lgl = "'" + "or '".join(opt + "' " for opt in availablePanels) raise SPYValueError(legal=lgl, varname="panels", actual=panels) if (panels == "channels" and avg_channels) or (panels == "trials" and avg_trials) \ or (panels == "tapers" and avg_tapers): msg = "Cannot use `panels = {}` and average across {} at the same time. " SPYWarning(msg.format(panels, panels)) return # Ensure the proper amount of averaging was specified avgFlags = [avg_channels, avg_trials, avg_tapers] if sum(avgFlags) == 0 and nTap * nTrials > 1: msg = "Need to average across at least one of tapers, channels or trials " +\ "for visualization. " SPYWarning(msg) return if sum(avgFlags) == 3: msg = "Averaging across trials, channels and tapers results in " +\ "single-panel plot. Please use `singlepanelplot` instead" SPYWarning(msg) return if isTimeFrequency: if sum(avgFlags) != 2: msg = "Multi-panel time-frequency visualization requires averaging across " +\ "two out of three dimensions (tapers, channels trials)" SPYWarning(msg) return # Prepare figure (same for all cases) if panels == "channels": npanels = nChan elif panels == "trials": npanels = nTrials else: # ``panels == "tapers"`` npanels = nTap # Construct subplot panel layout or vet provided layout nrow = kwargs.get("nrow", None) ncol = kwargs.get("ncol", None) if not isTimeFrequency: fig, ax_arr = _setup_figure(npanels, nrow=nrow, ncol=ncol, xLabel="Frequency [Hz]", yLabel=dataLbl, grid=grid, include_colorbar=False, sharex=True, sharey=True) else: fig, ax_arr, cax = _setup_figure(npanels, nrow=nrow, ncol=ncol, xLabel="Time [s]", yLabel="Frequency [Hz]", grid=grid, include_colorbar=True, sharex=True, sharey=True) # Monkey-patch object-counter to newly created figure fig.spectralPlot = True # Start with the "simple" case: "regular" spectra, no time involved if not isTimeFrequency: # We're not dealing w/TF data here nTime = 1 N = 1 # For each panel stratification, set corresponding positional and # keyword args for iteratively calling `_compute_pltArr` if panels == "channels": panelVar = "channel" panelValues = chArr panelTitles = chArr if not avg_trials and avg_tapers: avgDim1 = "taper" avgDim2 = None innerVar = "trial" innerValues = trList majorTitle = "{} trials averaged across {} tapers".format( nTrials, nTap) showLegend = True elif avg_trials and not avg_tapers: avgDim1 = None avgDim2 = None innerVar = "taper" innerValues = tpArr majorTitle = "{} tapers averaged across {} trials".format( nTap, nTrials) showLegend = True else: # `avg_trials` and `avg_tapers` avgDim1 = "taper" avgDim2 = None innerVar = "trial" innerValues = ["all"] majorTitle = " Average of {} tapers and {} trials".format( nTap, nTrials) showLegend = False elif panels == "trials": panelVar = "trial" panelValues = trList panelTitles = ["Trial #{}".format(trlno) for trlno in trList] if not avg_channels and avg_tapers: avgDim1 = "taper" avgDim2 = None innerVar = "channel" innerValues = chArr majorTitle = "{} channels averaged across {} tapers".format( nChan, nTap) showLegend = True elif avg_channels and not avg_tapers: avgDim1 = "channel" avgDim2 = None innerVar = "taper" innerValues = tpArr majorTitle = "{} tapers averaged across {} channels".format( nTap, nChan) showLegend = True else: # `avg_channels` and `avg_tapers` avgDim1 = "taper" avgDim2 = "channel" innerVar = "trial" innerValues = ["all"] majorTitle = " Average of {} channels and {} tapers".format( nChan, nTap) showLegend = False else: # panels = "tapers" panelVar = "taper" panelValues = tpArr panelTitles = ["Taper #{}".format(tpno) for tpno in tpArr] if not avg_trials and avg_channels: avgDim1 = "channel" avgDim2 = None innerVar = "trial" innerValues = trList majorTitle = "{} trials averaged across {} channels".format( nTrials, nChan) showLegend = True elif avg_trials and not avg_channels: avgDim1 = None avgDim2 = None innerVar = "channel" innerValues = chArr majorTitle = "{} channels averaged across {} trials".format( nChan, nTrials) showLegend = True else: # `avg_trials` and `avg_channels` avgDim1 = "channel" avgDim2 = None innerVar = "trial" innerValues = ["all"] majorTitle = " Average of {} channels and {} trials".format( nChan, nTrials) showLegend = False # Loop over panels, within each panel, loop over `innerValues` to (potentially) # plot multiple spectra per panel kwargs = {"avg1": avgDim1, "avg2": avgDim2} for panelCount, panelVal in enumerate(panelValues): kwargs[panelVar] = panelVal for innerVal in innerValues: kwargs[innerVar] = innerVal pltArr = _compute_pltArr(self, nFreq, N, nTime, complexConversion, pltDtype, **kwargs) ax_arr[panelCount].plot(freqArr, np.log10(pltArr), label=innerVar.capitalize() + " " + str(innerVal)) ax_arr[panelCount].set_title(panelTitles[panelCount], size=pltConfig["multiTitleSize"]) if showLegend: handles, labels = ax_arr[0].get_legend_handles_labels() ax_arr[0].legend(handles, labels) if title is None: fig.suptitle(majorTitle, size=pltConfig["singleTitleSize"]) # Now, multi-panel time-frequency visualizations else: # Compute (and verify) length of selected time intervals tLengths = _prep_toilim_avg(self) nTime = tLengths[0] time = self.time[trList[0]][self._selection.time[0]] N = 1 if panels == "channels": panelVar = "channel" panelValues = chArr panelTitles = chArr majorTitle = " Average of {} tapers and {} trials".format( nTap, nTrials) avgDim1 = "taper" avgDim2 = None elif panels == "trials": panelVar = "trial" panelValues = trList panelTitles = ["Trial #{}".format(trlno) for trlno in trList] majorTitle = " Average of {} channels and {} tapers".format( nChan, nTap) avgDim1 = "taper" avgDim2 = "channel" else: # panels = "tapers" panelVar = "taper" panelValues = tpArr panelTitles = ["Taper #{}".format(tpno) for tpno in tpArr] majorTitle = " Average of {} channels and {} trials".format( nChan, nTrials) avgDim1 = "channel" avgDim2 = None # Loop over panels, within each panel, loop over `innerValues` to (potentially) # plot multiple spectra per panel kwargs = {"avg1": avgDim1, "avg2": avgDim2} vmins = [] vmaxs = [] for panelCount, panelVal in enumerate(panelValues): kwargs[panelVar] = panelVal pltArr = _compute_pltArr(self, nFreq, N, nTime, complexConversion, pltDtype, **kwargs) vmins.append(pltArr.min()) vmaxs.append(pltArr.max()) ax_arr[panelCount].imshow(pltArr, origin="lower", interpolation=interp, cmap=cmap, extent=(time[0], time[-1], freqArr[0], freqArr[-1]), aspect="auto") ax_arr[panelCount].set_title(panelTitles[panelCount], size=pltConfig["multiTitleSize"]) # Render colorbar if vmin is None: vmin = min(vmins) if vmax is None: vmax = max(vmaxs) cbar = _setup_colorbar(fig, ax_arr, cax, label=dataLbl.replace(" [dB]", ""), outline=False, vmin=vmin, vmax=vmax) if title is None: fig.suptitle(majorTitle, size=pltConfig["singleTitleSize"]) # Increment overlay-counter and draw figure fig.objCount += 1 plt.draw() self._selection = None return fig
def load(filename, tag=None, dataclass=None, checksum=False, mode="r+", out=None): """ Load Syncopy data object(s) from disk Either loads single files within or outside of '.spy'-containers or loads multiple objects from a single '.spy'-container. Loading from containers can be further controlled by imposing restrictions on object class(es) (via `dataclass`) and file-name tag(s) (via `tag`). Parameters ---------- filename : str Either path to Syncopy container folder (\*.spy, if omitted, the extension '.spy' will be appended) or name of data or metadata file. If `filename` points to a container and no further specifications are provided, the entire contents of the container is loaded. Otherwise, specific objects may be selected using the `dataclass` or `tag` keywords (see below). tag : None or str or list If `filename` points to a container, `tag` may be used to filter objects by filename-`tag`. Multiple tags can be provided using a list, e.g., ``tag = ['experiment1', 'experiment2']``. Can be combined with `dataclass` (see below). Invalid if `filename` points to a single file. dataclass : None or str or list If provided, only objects of provided dataclass are loaded from disk. Available options are '.analog', '.spectral', .spike' and '.event' (as listed in ``spy.FILE_EXT["data"]``). Multiple class specifications can be provided using a list, e.g., ``dataclass = ['.analog', '.spike']``. Can be combined with `tag` (see above) and is also valid if `filename` points to a single file (e.g., to ensure loaded object is of a specific type). checksum : bool If `True`, checksum-matching is performed on loaded object(s) to ensure data-integrity (impairs performance particularly when loading large files). mode : str Data access mode of loaded objects (can be 'r' for read-only, 'r+' or 'w' for read/write access). out : Syncopy data object Empty object to be filled with data loaded from disk. Has to match the type of the on-disk file (e.g., ``filename = 'mydata.analog'`` requires `out` to be a :class:`syncopy.AnalogData` object). Can only be used when loading single objects from disk (`out` is ignored when multiple files are loaded from a container). Returns ------- Nothing : None If a single file is loaded and `out` was provided, `out` is filled with data loaded from disk, i.e., :func:`syncopy.load` does **not** create a new object obj : Syncopy data object If a single file is loaded and `out` was `None`, :func:`syncopy.load` returns a new object. objdict : dict If multiple files are loaded, :func:`syncopy.load` creates a new object for each file and places them in a dictionary whose keys are the base-names (sans path) of the corresponding files. Notes ----- All of Syncopy's classes offer (limited) support for data loading upon object creation. Just as the class method ``.save`` can be used as a shortcut for :func:`syncopy.save`, Syncopy objects can be created from Syncopy data-files upon creation, e.g., >>> adata = spy.AnalogData('/path/to/session1.analog') creates a new :class:`syncopy.AnalogData` object and immediately fills it with data loaded from the file "/path/to/session1.analog". Since only one object can be created at a time, this loading shortcut only supports single file specifications (i.e., ``spy.AnalogData("container.spy")`` is invalid). Examples -------- Load all objects found in the spy-container "sessionName" (the extension ".spy" may or may not be provided) >>> objectDict = spy.load("sessionName") >>> # --> returns a dict with base-filenames as keys Load all :class:`syncopy.AnalogData` and :class:`syncopy.SpectralData` objects from the spy-container "sessionName" >>> objectDict = spy.load("sessionName.spy", dataclass=['analog', 'spectral']) Load a specific :class:`syncopy.AnalogData` object from the above spy-container >>> obj = spy.load("sessionName.spy/sessionName_someTag.analog") This is equivalent to >>> obj = spy.AnalogData("sessionName.spy/sessionName_someTag.analog") If the "sessionName" spy-container only contains one object with the tag "someTag", the above call is equivalent to >>> obj = spy.load("sessionName.spy", tag="someTag") If there are multiple objects of different types using the same tag "someTag", the above call can be further narrowed down to only load the requested :class:`syncopy.AnalogData` object >>> obj = spy.load("sessionName.spy", tag="someTag", dataclass="analog") See also -------- syncopy.save : save syncopy object on disk """ # Ensure `filename` is either a valid .spy container or data file: if `filename` # is a directory w/o '.spy' extension, append it if not isinstance(filename, str): raise SPYTypeError(filename, varname="filename", expected="str") if len(os.path.splitext(os.path.abspath( os.path.expanduser(filename)))[1]) == 0: filename += FILE_EXT["dir"] try: fileInfo = filename_parser(filename) except Exception as exc: raise exc if tag is not None: if isinstance(tag, str): tags = [tag] else: tags = tag try: array_parser(tags, varname="tag", ntype=str) except Exception as exc: raise exc if fileInfo["filename"] is not None: raise SPYError("Only containers can be loaded with `tag` keyword!") for tk in range(len(tags)): tags[tk] = "*" + tags[tk] + "*" else: tags = "*" # If `dataclass` was provided, format it for our needs (e.g. 'spike' -> ['.spike']) if dataclass is not None: if isinstance(dataclass, str): dataclass = [dataclass] try: array_parser(dataclass, varname="dataclass", ntype=str) except Exception as exc: raise exc dataclass = [ "." + dclass if not dclass.startswith(".") else dclass for dclass in dataclass ] extensions = set(dataclass).intersection(FILE_EXT["data"]) if len(extensions) == 0: lgl = "extension(s) '" + "or '".join(ext + "' " for ext in FILE_EXT["data"]) raise SPYValueError(legal=lgl, varname="dataclass", actual=str(dataclass)) # Avoid any misunderstandings here... if not isinstance(checksum, bool): raise SPYTypeError(checksum, varname="checksum", expected="bool") # Abuse `AnalogData.mode`-setter to vet `mode` try: spd.AnalogData().mode = mode except Exception as exc: raise exc # If `filename` points to a spy container, `glob` what's inside, otherwise just load if fileInfo["filename"] is None: if dataclass is None: extensions = FILE_EXT["data"] container = os.path.join(fileInfo["folder"], fileInfo["container"]) fileList = [] for ext in extensions: for tag in tags: fileList.extend(glob(os.path.join(container, tag + ext))) if len(fileList) == 0: fsloc = os.path.join(container, "" + \ "or ".join(tag + " " for tag in tags) + \ "with extensions " + \ "or ".join(ext + " " for ext in extensions)) raise SPYIOError(fsloc, exists=False) if len(fileList) == 1: return _load(fileList[0], checksum, mode, out) if out is not None: msg = "When loading multiple objects, the `out` keyword is ignored" SPYWarning(msg) objectDict = {} for fname in fileList: obj = _load(fname, checksum, mode, None) objectDict[os.path.basename(obj.filename)] = obj return objectDict else: if dataclass is not None: if os.path.splitext(fileInfo["filename"])[1] not in dataclass: lgl = "extension '" + \ "or '".join(dclass + "' " for dclass in dataclass) raise SPYValueError(legal=lgl, varname="filename", actual=fileInfo["filename"]) return _load(filename, checksum, mode, out)
def _load(filename, checksum, mode, out): """ Local helper """ fileInfo = filename_parser(filename) hdfFile = os.path.join(fileInfo["folder"], fileInfo["filename"]) jsonFile = hdfFile + FILE_EXT["info"] try: _ = io_parser(hdfFile, varname="hdfFile", isfile=True, exists=True) _ = io_parser(jsonFile, varname="jsonFile", isfile=True, exists=True) except Exception as exc: raise exc with open(jsonFile, "r") as file: jsonDict = json.load(file) if "dataclass" not in jsonDict.keys(): raise SPYError( "Info file {} does not contain a dataclass field".format(jsonFile)) if hasattr(spd, jsonDict["dataclass"]): dataclass = getattr(spd, jsonDict["dataclass"]) else: raise SPYError("Unknown data class {class}".format( jsonDict["dataclass"])) requiredFields = tuple( startInfoDict.keys()) + dataclass._infoFileProperties for key in requiredFields: if key not in jsonDict.keys(): raise SPYError( "Required field {field} for {cls} not in {file}".format( field=key, cls=dataclass.__name__, file=jsonFile)) # If `_hdr` is an empty list, set it to `None` to not confuse meta-functions hdr = jsonDict.get("_hdr") if isinstance(hdr, (list, np.ndarray)): if len(hdr) == 0: jsonDict["_hdr"] = None # FIXME: add version comparison (syncopy.__version__ vs jsonDict["_version"]) # If wanted, perform checksum matching if checksum: hsh_msg = "hash = {hsh:s}" hsh = hash_file(hdfFile) if hsh != jsonDict["file_checksum"]: raise SPYValueError( legal=hsh_msg.format(hsh=jsonDict["file_checksum"]), varname=os.path.basename(hdfFile), actual=hsh_msg.format(hsh=hsh)) # Parsing is done, create new or check provided object if out is not None: try: data_parser(out, varname="out", writable=True, dataclass=jsonDict["dataclass"]) except Exception as exc: raise exc new_out = False else: out = dataclass() new_out = True # First and foremost, assign dimensional information dimord = jsonDict.pop("dimord") out.dimord = dimord # Access data on disk (error checking is done by setters) out.mode = mode for datasetProperty in out._hdfFileDatasetProperties: setattr(out, datasetProperty, h5py.File(hdfFile, mode="r")[datasetProperty]) # Abuse ``definetrial`` to set trial-related props trialdef = h5py.File(hdfFile, mode="r")["trialdefinition"][()] out.definetrial(trialdef) # Assign metadata for key in [ prop for prop in dataclass._infoFileProperties if prop != "dimord" ]: setattr(out, key, jsonDict[key]) # Write `cfg` entries thisMethod = sys._getframe().f_code.co_name.replace("_", "") out.cfg = {"method": thisMethod, "files": [hdfFile, jsonFile]} # Write log-entry msg = "Read files v. {ver:s} ".format(ver=jsonDict["_version"]) msg += "{hdf:s}\n\t" + (len(msg) + len(thisMethod) + 2) * " " + "{json:s}" out.log = msg.format(hdf=hdfFile, json=jsonFile) # Happy breakdown return out if new_out else None
def singlepanelplot(*data, trials="all", channels="all", tapers="all", toilim=None, foilim=None, avg_channels=True, avg_tapers=True, interp="spline36", cmap="plasma", vmin=None, vmax=None, title=None, grid=None, overlay=True, fig=None, **kwargs): """ Plot contents of Syncopy data object(s) using single-panel figure(s) **Usage Summary** List of Syncopy data objects and respective valid plotting commands/selectors: :class:`~syncopy.AnalogData` : trials, channels, toi/toilim Examples >>> fig1, fig2 = spy.singlepanelplot(data1, data2, channels=["channel1", "channel2"], overlay=False) >>> cfg = spy.StructDict() >>> cfg.trials = [5, 3, 0]; cfg.toilim = [0.25, 0.5] >>> fig = spy.singlepanelplot(cfg, data1, data2, overlay=True) :class:`~syncopy.SpectralData` : trials, channels, tapers, toi/toilim, foi/foilim Examples >>> fig1, fig2 = spy.singlepanelplot(data1, data2, channels=["channel1", "channel2"], tapers=[3, 0], foilim=[30, 80], avg_channels=False, avg_tapers=True, grid=True, overlay=False) >>> cfg = spy.StructDict() >>> cfg.trials = [1, 0, 3]; cfg.toilim = [-0.25, 0.5]; cfg.vmin=0.2; cfg.vmax=1.0 >>> fig = spy.singlepanelplot(cfg, tfData1) Parameters ---------- data : Syncopy data object(s) One or more non-empty Syncopy data object(s). **Note**: if multiple datasets are provided, they must be all of the same type (e.g., :class:`~syncopy.AnalogData`) and should contain the same or at least comparable channels, trials etc. Consequently, some keywords are only valid for certain types of Syncopy objects, e.g., `foilim` is not a valid plotting-selector for an :class:`~syncopy.AnalogData` object. trials : list (integers) or None or "all" Trials to average across. Either list of integers representing trial numbers (can include repetitions and need not be sorted), "all" or `None`. If `data` is a (series of) :class:`~syncopy.AnalogData` object(s), `trials` may be `None`, so that no trial information is used and the raw contents of provided input dataset(s) is plotted (**Warning**: depending on the size of the supplied dataset(s), this might be very memory-intensive). For all other Syncopy data objects, `trials` must not be `None`. channels : list (integers or strings), slice, range or "all" Channel-selection; can be a list of channel names (``['channel3', 'channel1']``), a list of channel indices (``[3, 5]``), a slice (``slice(3, 10)``) or range (``range(3, 10)``). Selections can be unsorted and may include repetitions. If multiple input objects are provided, `channels` needs to be a valid selector for all supplied datasets. tapers : list (integers or strings), slice, range or "all" Taper-selection; can be a list of taper names (``['dpss-win-1', 'dpss-win-3']``), a list of taper indices (``[3, 5]``), a slice (``slice(3, 10)``) or range (``range(3, 10)``). Selections can be unsorted and may include repetitions but must match exactly, be finite and not NaN. If multiple input objects are provided, `tapers` needs to be a valid selector for all supplied datasets. toilim : list (floats [tmin, tmax]) or None Time-window ``[tmin, tmax]`` (in seconds) to be extracted from each trial. Window specifications must be sorted and not NaN but may be unbounded. Boundaries `tmin` and `tmax` are included in the selection. If `toilim` is `None`, the entire time-span in each trial is selected. If multiple input objects are provided, `toilim` needs to be a valid selector for all supplied datasets. **Note** `toilim` is only a valid selector if `trials` is not `None`. foilim : list (floats [fmin, fmax]) or "all" Frequency-window ``[fmin, fmax]`` (in Hz) to be extracted from each trial; Window specifications must be sorted and not NaN but may be unbounded. Boundaries `fmin` and `fmax` are included in the selection. If `foilim` is `None` or all frequencies are selected for plotting. If multiple input objects are provided, `foilim` needs to be a valid selector for all supplied datasets. avg_channels : bool If `True`, plot input dataset(s) averaged across channels specified by `channels`. If `False`, no averaging is performed resulting in multiple plots, each representing a single channel. avg_tapers : bool If `True`, plot :class:`~syncopy.SpectralData` objects averaged across tapers specified by `tapers`. If `False`, no averaging is performed resulting in multiple plots, each representing a single taper. interp : str or None Interpolation method used for plotting two-dimensional contour maps such as time-frequency power spectra. To see a list of available interpolation methods use the command ``list(mpl.image._interpd_.keys())``. Please consult the matplotlib documentation for more details. Has no effect on line-plots. cmap : str Colormap used for plotting two-dimensional contour maps such as time-frequency power spectra. To see a list of available color-maps use the command ``list(mpl.cm._cmap_registry.keys())``. Pleasee consult the matplotlib documentation for more details. Has no effect on line-plots. vmin : float or None Lower bound of data-range covered by colormap when plotting two-dimensional contour maps such as time-frequency power spectra. If `vmin` is `None` the minimal (absolute) value of the shown dataset is used. When comparing multiple contour maps, all visualizations should use the same `vmin` to ensure quantitative similarity of peak values. vmax : float or None Upper bound of data-range covered by colormap when plotting two-dimensional contour maps such as time-frequency power spectra. If `vmax` is `None` the maximal (absolute) value of the shown dataset is used. When comparing multiple contour maps, all visualizations should use the same `vmin` to ensure quantitative similarity of peak values. title : str or None If `str`, `title` specifies as axis panel-title, if `None`, an auto-generated title is used. grid : bool or None If `True`, grid-lines are drawn, if `None` or `False` no grid-lines are rendered. overlay : bool If `True`, and multiple input objects were provided, supplied datasets are plotted on top of each other (in the order of submission). If a single object was provided, ``overlay = True`` and `fig` is a :class:`~matplotlib.figure.Figure`, the supplied dataset is overlaid on top of any existing plot(s) in `fig`. **Note 1**: using an existing figure to overlay dataset(s) is only supported for figures created with this routine. **Note 2**: overlay-plotting is *not* supported for time-frequency :class:`~syncopy.SpectralData` objects. fig : matplotlib.figure.Figure or None If `None`, new :class:`~matplotlib.figure.Figure` instance(s) are created for provided input dataset(s). If `fig` is a :class:`~matplotlib.figure.Figure`, the code attempts to overlay provided input dataset(s) on top of existing plots in `fig`. **Note**: overlay-plots are only supported for figures generated with this routine. Only a single figure can be provided. Thus, in case of multiple input datasets with ``overlay = False``, any supplied `fig` is ignored. Returns ------- fig : (list of) matplotlib.figure.Figure instance(s) Either single figure (single input dataset or multiple input datasets with ``overlay = True``) or list of figures (multiple input datasets and ``overlay = False``). Notes ----- This function uses `matplotlib <https://matplotlib.org/>`_ to render data visualizations. Thus, usage of Syncopy's plotting capabilities requires a working matplotlib installation. The actual rendering is performed by class methods specific to the provided input object types (e.g., :class:`~syncopy.AnalogData`). Thus, :func:`~syncopy.singlepanelplot` is mainly a convenience function and management routine that invokes the appropriate drawing code. Data subset selection for plotting is performed using :func:`~syncopy.selectdata`, thus additional in-place data-selection via a `select` keyword is **not** supported. Examples -------- Please refer to the respective `singlepanelplot` class methods for detailed usage examples specific to the respective Syncopy data object type. See also -------- :func:`~syncopy.multipanelplot` : visualize Syncopy objects using multi-panel figure(s) :meth:`syncopy.AnalogData.singlepanelplot` : `singlepanelplot` for :class:`~syncopy.AnalogData` objects :meth:`syncopy.SpectralData.singlepanelplot` : `singlepanelplot` for :class:`~syncopy.SpectralData` objects """ # Abort if matplotlib is not available: FIXME -> `_prep_plots`? if not __plt__: raise SPYError(pltErrMsg.format("singlepanelplot")) # Collect all keywords of corresponding class-method (w/possibly user-provided # values) in dictionary defaults = get_defaults(data[0].singlepanelplot) lcls = locals() kwords = {} for kword in defaults: kwords[kword] = lcls[kword] # Call plotting manager return _anyplot(*data, overlay=overlay, method="singlepanelplot", **kwords, **kwargs)