def __init__(self, group): vd.Component.__init__(self) self._group = group SF = sf.SFData() a = kb.loadObject( key=dict(name='spikeforest_batch_group', group_name=group)) for recordings_name in a['recordings_names']: try: SF.loadRecordings(key=dict(name=recordings_name)) except: raise print('Warning: unable to load recordings: ' + recordings_name) for batch_name in a['batch_names']: try: SF.loadProcessingBatch(batch_name=batch_name) except: print('Warning: unable to load processing batch: ' + batch_name) self._SF_data = SF self._accuracy_threshold_input = vd.components.LineEdit( value=0.8, dtype=float, style=dict(width='70px')) self._update_button = vd.components.Button(onclick=self._on_update, class_='button', label='Update') self._study_sorter_fig = StudySorterFigure(SF) self._study_sorter_table = vd.div() # dummy vd.devel.loadBootstrap() self._update_accuracy_table()
def _on_group_changed(self, value=None): group_name = self._SEL_group.value() if not group_name: return if self._CB_use_summarized_recordings.checked(): a = kb.loadObject( key=dict(name='summarized_recordings', group_name=group_name) ) else: a = kb.loadObject( key=dict(name='spikeforest_recording_group', group_name=group_name) ) if not a: print('ERROR: unable to open recording group: '+group_name) return if ('recordings' not in a) or ('studies' not in a): print('ERROR: problem with recording group: '+group_name) return studies = a['studies'] recordings = a['recordings'] SF = sf.SFData() SF.loadStudies(studies) SF.loadRecordings2(recordings) self._SF = SF self._SEL_study.setOptions(SF.studyNames()) self._on_study_changed(value=self._SEL_study.value())
def _on_group_changed(self, value): group_name = self._SEL_group.value() if not group_name: return a = ca.loadObject(key=dict(name='spikeforest_recording_group', group_name=group_name)) # key=dict(name='spikeforest_results', output_id='spikeforest_test2')) SF = sf.SFData() SF.loadStudies(a['studies']) SF.loadRecordings2(a['recordings']) self._SF = SF self._SEL_study.setOptions(SF.studyNames()) self._on_study_changed(value=self._SEL_study.value())
def __init__(self, group): vd.Component.__init__(self) self._sorter_names = kb.loadObject(key=dict( name='spikeforest_sorter_names')) group_name = group self._group = group_name a = kb.loadObject( key=dict(name='summarized_recordings', group_name=group_name)) if not a: print('ERROR: unable to open recording group: ' + group_name) return if ('recordings' not in a) or ('studies' not in a): print('ERROR: problem with recording group: ' + group_name) return studies = a['studies'] recordings = a['recordings'] SF = sf.SFData() SF.loadStudies(studies) SF.loadRecordings2(recordings) sorter_names = self._sorter_names[group_name] for sorter_name in sorter_names: print('Loading sorting results for sorter: ' + sorter_name) b = kb.loadObject(key=dict(name='sorting_results', group_name=group_name, sorter_name=sorter_name)) if not b: print('WARNING: unable to open sorting results for sorter: ' + sorter_name) break SF.loadSortingResults(b['sorting_results']) self._SF_data = SF self._accuracy_threshold_input = vd.components.LineEdit( value=0.8, dtype=float, style=dict(width='70px')) self._update_button = vd.components.Button(onclick=self._on_update, class_='button', label='Update') self._study_sorter_fig = StudySorterFigure(SF) self._study_sorter_table = vd.div() # dummy vd.devel.loadBootstrap() self._update_accuracy_table()
def _on_output_id_changed(self, value): output_id = self._SEL_output_id.value() if not output_id: return key = dict(name='spikeforest_results', output_id=output_id) a = ca.loadObject(key=key) if a is None: raise Exception( 'Unable to load spikeforest result: {}'.format(output_id)) SF = sf.SFData() SF.loadStudies(a['studies']) SF.loadRecordings2(a['recordings']) SF.loadSortingResults(a['sorting_results']) self._SF = SF self._SEL_study.setOptions(SF.studyNames()) self._on_study_changed(value=self._SEL_study.value())
def __init__(self, output_id): vd.Component.__init__(self) self._output_id = output_id a = ca.loadObject(key=dict(name='spikeforest_results'), subkey=output_id) if not a: print('ERROR: unable to open results: ' + output_id) return if ('recordings' not in a) or ('studies' not in a) or ('sorting_results' not in a): print('ERROR: problem with output: ' + output_id) return studies = a['studies'] recordings = a['recordings'] sorting_results = a['sorting_results'] SF = sf.SFData() SF.loadStudies(studies) SF.loadRecordings2(recordings) SF.loadSortingResults(sorting_results) # sorter_names=[] # for SR in sorting_results: # sorter_names.append(SR['sorter']['name']) # sorter_names=list(set(sorter_names)) # sorter_names.sort() self._SF_data = SF self._accuracy_threshold_input = vd.components.LineEdit( value=0.8, dtype=float, style=dict(width='70px')) self._update_button = vd.components.Button(onclick=self._on_update, class_='button', label='Update') self._study_sorter_fig = StudySorterFigure(SF) self._study_sorter_table = vd.div() # dummy vd.devel.loadBootstrap() self._update_accuracy_table()
def _on_group_changed(self, value): group_name = self._SEL_group.value() a = kb.loadObject( key=dict(name='spikeforest_batch_group', group_name=group_name)) SF = sf.SFData() for recordings_name in a['recordings_names']: try: SF.loadRecordings(key=dict(name=recordings_name)) except: print('Warning: unable to load recordings: '+recordings_name) for batch_name in a['batch_names']: try: SF.loadProcessingBatch(batch_name=batch_name) except: print('Warning: unable to load processing batch: '+batch_name) self._SF = SF self._SEL_study.setOptions(SF.studyNames()) self._on_study_changed(value=self._SEL_study.value())
def _on_group_changed(self, value): group_name = self._SEL_group.value() if not group_name: return a = kb.loadObject( key=dict(name='summarized_recordings', group_name=group_name)) if not a: print('ERROR: unable to open recording group: ' + group_name) return if ('recordings' not in a) or ('studies' not in a): print('ERROR: problem with recording group: ' + group_name) return studies = a['studies'] recordings = a['recordings'] SF = sf.SFData() SF.loadStudies(studies) SF.loadRecordings2(recordings) sorter_names = self._sorter_names[group_name] for sorter_name in sorter_names: print('Loading sorting results for sorter: ' + sorter_name) b = kb.loadObject(key=dict(name='sorting_results', group_name=group_name, sorter_name=sorter_name)) if not b: print('WARNING: unable to open sorting results for sorter: ' + sorter_name) break SF.loadSortingResults(b['sorting_results']) self._SF = SF self._SEL_study.setOptions(SF.studyNames()) self._on_study_changed(value=self._SEL_study.value())
def aggregate_sorting_results(studies, recordings, sorting_results): SF = sf.SFData() SF.loadStudies(studies=studies) SF.loadRecordings2(recordings=recordings) SF.loadSortingResults(sorting_results=sorting_results) aggregated_sorting_results = dict( recording_sorting_results=[], study_sorting_results=[] ) for study_name in SF.studyNames(): print('study: '+study_name) S = SF.study(study_name) first_recording = S.recording(S.recordingNames()[0]) sorter_names = first_recording.sortingResultNames() for srname in sorter_names: print('sorter: '+srname) study_results0=dict( recording_indices = [], true_unit_ids = [], true_unit_snrs = [], true_unit_firing_rates = [], num_matches = [], num_false_positives = [], num_false_negatives = [] ) for recording_index,rname in enumerate(S.recordingNames()): print('recording: '+rname) rec = S.recording(rname) true_units_info = rec.trueUnitsInfo(format='json') true_units_info_by_id = dict() for true_unit in true_units_info: true_units_info_by_id[true_unit['unit_id']] = true_unit SR = rec.sortingResult(srname) comparison = SR.comparisonWithTruth(format='json') recording_results0=dict( true_unit_ids = [], true_unit_snrs = [], true_unit_firing_rates = [], num_matches = [], num_false_positives = [], num_false_negatives = [] ) ok = True for i in comparison: unit = comparison[i] best_unit = unit['best_unit'] unit_id = unit['unit_id'] true_unit = true_units_info_by_id[unit_id] recording_results0['true_unit_ids'].append(unit_id) recording_results0['true_unit_snrs'].append(round(true_unit['snr'], 3)) recording_results0['true_unit_firing_rates'].append( round(true_unit['firing_rate'], 3)) if 'num_false_positives' in unit: recording_results0['num_matches'].append(unit['num_matches']) recording_results0['num_false_positives'].append(unit['num_false_positives']) recording_results0['num_false_negatives'].append(unit['num_false_negatives']) else: ok = False error = 'missing field: num_false_positives' break if ok: recording_sorting_result = dict( study=study_name, recording=rname, sorter=srname, true_unit_ids=recording_results0['true_unit_ids'], true_unit_snrs=recording_results0['true_unit_snrs'], true_unit_firing_rates=recording_results0['true_unit_firing_rates'], num_matches=recording_results0['num_matches'], num_false_positives=recording_results0['num_false_positives'], num_false_negatives=recording_results0['num_false_negatives'] ) aggregated_sorting_results['recording_sorting_results'].append(recording_sorting_result) else: print('Warning: '+error) study_results0['recording_indices'].extend([recording_index]*len(recording_results0['true_unit_ids'])) study_results0['true_unit_ids'].extend(recording_results0['true_unit_ids']) study_results0['true_unit_snrs'].extend(recording_results0['true_unit_snrs']) study_results0['true_unit_firing_rates'].extend(recording_results0['true_unit_firing_rates']) study_results0['num_matches'].extend(recording_results0['num_matches']) study_results0['num_false_positives'].extend(recording_results0['num_false_positives']) study_results0['num_false_negatives'].extend(recording_results0['num_false_negatives']) study_sorting_result = dict( study=study_name, sorter=srname, true_unit_recording_indices=study_results0['recording_indices'], true_unit_ids=study_results0['true_unit_ids'], true_unit_snrs=study_results0['true_unit_snrs'], true_unit_firing_rates=study_results0['true_unit_firing_rates'], num_matches=study_results0['num_matches'], num_false_positives=study_results0['num_false_positives'], num_false_negatives=study_results0['num_false_negatives'] ) aggregated_sorting_results['study_sorting_results'].append(study_sorting_result) return aggregated_sorting_results