def download_recordings(*, batch_name): batch = kb.loadObject(key=dict(batch_name=batch_name)) jobs = batch['jobs'] for job in jobs: print('DOWNLOADING: ' + job['label']) dsdir = job['recording']['directory'] kb.realizeFile(dsdir + '/raw.mda')
def sf_batch_prepare(config, *, clear_all=False): login(config) study_obj = kb.loadObject(key=dict(name='spikeforest_recordings')) recordings = select_recordings(study_obj, config) sorters = config['sorters'] clear_in_process_only = (not clear_all) for ds in recordings: print('PREPARE: {}/{}'.format(ds['study'], ds['name'])) print('Downloading raw.mda') dsdir = ds['directory'] kb.realizeFile(dsdir + '/raw.mda') if config.get('summarize_recordings', None): key = dict(name='summarize_recording', batch_name=config['name'], study_name=ds['study'], recording_name=ds['name']) clear_result_for_key(key=key, in_process_only=clear_in_process_only) for sorter in sorters: key = dict(name='sort_recording', batch_name=config['name'], study_name=ds['study'], recording_name=ds['name'], sorter_name=sorter['name'], sorter_params=sorter['params']) clear_result_for_key(key=key, in_process_only=clear_in_process_only)
def displayResult(result, summary=True, comparison=False, summary_plots=False): if summary: rows = [] rows.append(result['dataset_name']) rows.append(result['dataset_dir']) rows.append(result['sorting_processor_name']) rows.append(result['sorting_processor_version']) rows.append(result['sorting_params']) df = pd.DataFrame(rows, index=[ 'Dataset', 'Directory', 'Sorting processor', 'Sorting version', 'Sorting parameters' ], columns=['']) def stylefunc(val): return 'text-align: left' s = df.style.applymap(stylefunc) display(s) if comparison: table = _read_json_file( kb.realizeFile(result['comparison_with_truth']['json'])) df = pd.DataFrame(table).transpose() display(df) if summary_plots: obj = result['summary']['plots'] for key in obj: display(HTML('<h3>{}</h3>'.format(key))) path = obj[key] path = kb.realizeFile(path) display(Image(path, format='jpeg'))
def download_datasets(run_code=default_run_code): tasks=load_tasks(run_code=run_code) for i,task in enumerate(tasks): ds=task.dataset() print('Download task {} of {}: {}'.format(i+1,len(tasks),ds['name'])) dsdir=ds['directory'] kb.realizeFile(dsdir+'/raw.mda')
def _download_recordings(*,jobs): for ii,job in enumerate(jobs): val=pa.get(key=job) if not val: if 'recording' in job: if 'directory' in job['recording']: dsdir=job['recording']['directory'] fname=dsdir+'/raw.mda' print('REALIZING FILE: '+fname) kb.realizeFile(fname)
def spikeforest_sort( recording_dirname, # The recording extractor sorter, sorting_params, _force_run=False, _force_save=False ): recording_signature=kb.computeDirHash(recording_dirname) signature_obj=dict( sorter_name=sorter.name, sorter_version=sorter.version, recording=recording_signature, sorting_params=sorting_params ) if not _force_run: print('Looking up in cache...') firings=kb.realizeFile(key=signature_obj) if firings: print('Found') if _force_save: print('Saving') kb.saveFile(fname=firings,key=signature_obj) return si.MdaSortingExtractor(firings_file=firings) recording=si.MdaRecordingExtractor(recording_dirname) sorting=sorter(recording=recording,**sorting_params) si.MdaSortingExtractor.writeSorting(sorting=sorting,save_path='tmp_firings.mda') kb.saveFile(fname='tmp_firings.mda',key=signature_obj) return sorting
def compute_recording_info(recording): out = ComputeRecordingInfo.execute(recording_dir=recording['directory'], channels=recording.get('channels', []), json_out={ 'ext': '.json' }).outputs['json_out'] kb.saveFile(out) return read_json_file(kb.realizeFile(out))
def _download_recording_if_needed(job): if 'recording' in job: if 'directory' in job['recording']: dsdir=job['recording']['directory'] fname=dsdir+'/raw.mda' print('REALIZING FILE: '+fname) fname2=kb.realizeFile(fname) if not fname2: raise Exception('Unable to realize file: '+fname)
def compute_units_info(*,recording_dir,firings,channel_ids=[],unit_ids=[],return_format='json'): out=ComputeUnitsInfo.execute(recording_dir=recording_dir,firings=firings,unit_ids=unit_ids,channel_ids=channel_ids,json_out={'ext':'.json'}).outputs fname=out['json_out'] if return_format=='filename': return fname else: fname=kb.realizeFile(fname) with open(fname) as f: return json.load(f)
def _on_view_true_unit_autocorrelograms(self): dirname = self._recording.directory() img = PlotAutoCorrelograms.execute(recording_dir=dirname, channels=[], firings=dirname + '/firings_true.mda', plot_out={ 'ext': '.jpg' }).outputs['plot_out'] img = kb.realizeFile(img) self._view = ImageView(img) self.refresh()
def plot(self, name, format='image'): plots = self._obj['summary'].get('plots', dict()) url = plots[name] if format == 'url': return url else: path = kb.realizeFile(url) if format == 'image': return Image.open(path) elif format == 'path': return path else: raise Exception('Invalid format: ' + format)
def kb_read_text_file(fname): fname = kb.realizeFile(fname) with open(fname, 'r') as f: return f.read()
def kb_read_json_file(fname): fname = kb.realizeFile(fname) if not fname: raise Exception('Unable to realize file: ' + fname) with open(fname, 'r') as f: return json.load(f)
def compute_dataset_info(dataset): out=ComputeDatasetInfo.execute(recording_dir=dataset['directory'],json_out={'ext':'.json'}).outputs['json_out'] kb.saveFile(out) return read_json_file(kb.realizeFile(out))