def __init__(self, arg): super().__init__() self._hash = None if isinstance(arg, se.RecordingExtractor): self._recording = arg self.copy_channel_properties(recording=self._recording) else: self._recording = None self._client = MountainClient() if 'download_from' in arg: self._client.configDownloadFrom(arg['download_from']) if 'path' in arg: path = arg['path'] if self._client.isFile(path): file_path = self._client.realizeFile(path=path) if not file_path: raise Exception( 'Unable to realize file: {}'.format(path)) self._init_from_file(file_path, original_path=path, kwargs=arg) else: raise Exception('Not a file: {}'.format(path)) else: raise Exception('Unable to initialize recording extractor')
def javascript_state_changed(self, prev_state, state): self._set_status('running', 'Running clustering') alg_name = state.get('alg_name', 'none') alg_arguments = state.get('alg_arguments', dict()) args0 = alg_arguments.get(alg_name, {}) client = MountainClient() client.configDownloadFrom('spikeforest.public') dirname = os.path.dirname(os.path.realpath(__file__)) fname = os.path.join(dirname, 'clustering_datasets.json') with open(fname, 'r') as f: datasets = json.load(f) for ds in datasets['datasets']: print('Loading {}'.format(ds['path'])) path2 = client.realizeFile(ds['path']) if path2: ds['data'] = self._load_dataset_data(path2) if alg_name: print('Clustering...') ds['labels'] = self._do_clustering(ds['data'], alg_name, args0, dict(true_num_clusters=ds['trueNumClusters'])) else: print('Unable to realize file: {}'.format(ds['path'])) self._set_state( algorithms=self._algorithms, datasets=datasets ) self._set_status('finished', 'Finished clustering')
def javascript_state_changed(self, prev_state, state): self._set_status('running', 'Running SectorPlot') data_samples_path = state.get('data_samples_path', None) if not data_samples_path: self._set_error('Missing data_samples_path') return client = MountainClient() if state.get('download_from', None): client.configDownloadFrom(state.get('download_from')) print(data_samples_path) self._set_status('running', 'Realizing file: {}'.format(data_samples_path)) path = client.realizeFile(data_samples_path) if not path: self._set_error( 'Unable to realize file: {}'.format(data_samples_path)) return self._set_status('running', 'Loading file.') data_samples = np.load(path) print(data_samples.shape) self._set_state(data_samples=data_samples, status='finished', status_message='')
def javascript_state_changed(self, prev_state, state): client = MountainClient() path = state.get('path', None) download_from = state.get('download_from', None) if not path: self._set_error('Missing: path') return if download_from: client.configDownloadFrom(download_from) self._set_status('running', 'Loading markdown file {}'.format(path)) txt = client.loadText(path) if not txt: self._set_error('Unable to load text from file: {}'.format(path)) return self._set_state(content=txt, status='finished', status_message='')
class AutoSortingExtractor(se.SortingExtractor): def __init__(self, arg): super().__init__() self._hash = None if isinstance(arg, se.SortingExtractor): self._sorting = arg self.copy_unit_properties(sorting=self._sorting) else: self._sorting = None self._client = MountainClient() if 'download_from' in arg: self._client.configDownloadFrom(arg['download_from']) if 'path' in arg: path = arg['path'] if self._client.isFile(path): file_path = self._client.realizeFile(path=path) if not file_path: raise Exception( 'Unable to realize file: {}'.format(file_path)) self._init_from_file(file_path, original_path=path, kwargs=arg) else: raise Exception('Not a file: {}'.format(path)) else: raise Exception('Unable to initialize sorting extractor') def _init_from_file(self, path: str, *, original_path: str, kwargs: dict): if original_path.endswith('.mda'): if 'samplerate' not in kwargs: raise Exception('Missing argument: samplerate') samplerate = kwargs['samplerate'] self._sorting = MdaSortingExtractor(firings_file=path, samplerate=samplerate) hash0 = self._client.sha1OfObject( dict(firings_path=self._client.computeFileSha1(path), samplerate=samplerate)) setattr(self, 'hash', hash0) else: raise Exception('Unsupported format for {}'.format(original_path)) def hash(self): if not self._hash: if hasattr(self._sorting, 'hash'): if type(self._sorting.hash) == str: self._hash = self._sorting.hash else: self._hash = self._sorting.hash() else: self._hash = _samplehash(self._sorting) return self._hash def get_unit_ids(self): return self._sorting.get_unit_ids() def get_unit_spike_train(self, **kwargs): return self._sorting.get_unit_spike_train(**kwargs) def get_sampling_frequency(self): return self._sorting.get_sampling_frequency()
from copy import deepcopy from mountaintools import client as mt from spikeforest import SFMdaRecordingExtractor from mountaintools import MountainClient from .spikeforest_view_launchers import get_spikeforest_view_launchers from .recordingcontext import RecordingContext local_client = MountainClient() class SpikeForestContext(): def __init__(self, studies=[], recordings=[], sorting_results=[], aggregated_sorting_results=None): self._signal_handlers = dict() self._any_state_change_handlers = [] print('******** FORESTVIEW: Initializing study context') self._studies = studies self._recordings = recordings self._sorting_results = sorting_results self._aggregated_sorting_results = aggregated_sorting_results self._recording_contexts = dict() self._studies_by_name = dict() for stu in self._studies: self._studies_by_name[stu['name']] = stu self._recordings_by_id = dict()