Exemplo n.º 1
0
    def _on_group_changed(self, value=None):
        group_name = self._SEL_group.value()
        if not group_name:
            return
        if self._CB_use_summarized_recordings.checked():
            a = kb.loadObject(
                key=dict(name='summarized_recordings', group_name=group_name)
            )
        else:
            a = kb.loadObject(
                key=dict(name='spikeforest_recording_group',
                         group_name=group_name)
            )
        if not a:
            print('ERROR: unable to open recording group: '+group_name)
            return

        if ('recordings' not in a) or ('studies' not in a):
            print('ERROR: problem with recording group: '+group_name)
            return

        studies = a['studies']
        recordings = a['recordings']

        SF = sf.SFData()
        SF.loadStudies(studies)
        SF.loadRecordings2(recordings)

        self._SF = SF
        self._SEL_study.setOptions(SF.studyNames())
        self._on_study_changed(value=self._SEL_study.value())
Exemplo n.º 2
0
 def initialize(self):
     self._group_names = kb.loadObject(key=dict(
         name='spikeforest_recording_group_names'))
     self._sorter_names = kb.loadObject(key=dict(
         name='spikeforest_sorter_names'))
     self._SEL_group.setOptions([''] + self._group_names)
     self._SEL_group.setValue('magland_synth_test')
     self._on_group_changed(value=self._SEL_group.value())
Exemplo n.º 3
0
    def __init__(self, group):
        vd.Component.__init__(self)

        self._sorter_names = kb.loadObject(key=dict(
            name='spikeforest_sorter_names'))

        group_name = group
        self._group = group_name

        a = kb.loadObject(
            key=dict(name='summarized_recordings', group_name=group_name))
        if not a:
            print('ERROR: unable to open recording group: ' + group_name)
            return

        if ('recordings' not in a) or ('studies' not in a):
            print('ERROR: problem with recording group: ' + group_name)
            return

        studies = a['studies']
        recordings = a['recordings']

        SF = sf.SFData()
        SF.loadStudies(studies)
        SF.loadRecordings2(recordings)

        sorter_names = self._sorter_names[group_name]
        for sorter_name in sorter_names:
            print('Loading sorting results for sorter: ' + sorter_name)
            b = kb.loadObject(key=dict(name='sorting_results',
                                       group_name=group_name,
                                       sorter_name=sorter_name))
            if not b:
                print('WARNING: unable to open sorting results for sorter: ' +
                      sorter_name)
                break
            SF.loadSortingResults(b['sorting_results'])

        self._SF_data = SF

        self._accuracy_threshold_input = vd.components.LineEdit(
            value=0.8, dtype=float, style=dict(width='70px'))
        self._update_button = vd.components.Button(onclick=self._on_update,
                                                   class_='button',
                                                   label='Update')
        self._study_sorter_fig = StudySorterFigure(SF)
        self._study_sorter_table = vd.div()  # dummy

        vd.devel.loadBootstrap()

        self._update_accuracy_table()
Exemplo n.º 4
0
def assemble_job_results(*, batch_name):
    batch = kb.loadObject(key=dict(batch_name=batch_name))
    jobs = batch['jobs']
    job_results = []
    for job in jobs:
        print('ASSEMBLING: ' + job['label'])
        result = kb.loadObject(key=job)
        if not result:
            raise Exception('Unable to load object for job: ' + job['label'])
        job_results.append(dict(job=job, result=result))
    print('Saving results...')
    kb.saveObject(key=dict(name='job_results', batch_name=batch_name),
                  object=dict(job_results=job_results))
    print('Done.')
Exemplo n.º 5
0
def sf_batch_prepare(config, *, clear_all=False):
    login(config)
    study_obj = kb.loadObject(key=dict(name='spikeforest_recordings'))
    recordings = select_recordings(study_obj, config)
    sorters = config['sorters']

    clear_in_process_only = (not clear_all)
    for ds in recordings:
        print('PREPARE: {}/{}'.format(ds['study'], ds['name']))
        print('Downloading raw.mda')
        dsdir = ds['directory']
        kb.realizeFile(dsdir + '/raw.mda')

        if config.get('summarize_recordings', None):
            key = dict(name='summarize_recording',
                       batch_name=config['name'],
                       study_name=ds['study'],
                       recording_name=ds['name'])
            clear_result_for_key(key=key,
                                 in_process_only=clear_in_process_only)

        for sorter in sorters:
            key = dict(name='sort_recording',
                       batch_name=config['name'],
                       study_name=ds['study'],
                       recording_name=ds['name'],
                       sorter_name=sorter['name'],
                       sorter_params=sorter['params'])
            clear_result_for_key(key=key,
                                 in_process_only=clear_in_process_only)
Exemplo n.º 6
0
def download_recordings(*, batch_name):
    batch = kb.loadObject(key=dict(batch_name=batch_name))
    jobs = batch['jobs']
    for job in jobs:
        print('DOWNLOADING: ' + job['label'])
        dsdir = job['recording']['directory']
        kb.realizeFile(dsdir + '/raw.mda')
Exemplo n.º 7
0
    def __init__(self, group):
        vd.Component.__init__(self)

        self._group = group
        SF = sf.SFData()
        a = kb.loadObject(
            key=dict(name='spikeforest_batch_group', group_name=group))
        for recordings_name in a['recordings_names']:
            try:
                SF.loadRecordings(key=dict(name=recordings_name))
            except:
                raise
                print('Warning: unable to load recordings: ' + recordings_name)
            for batch_name in a['batch_names']:
                try:
                    SF.loadProcessingBatch(batch_name=batch_name)
                except:
                    print('Warning: unable to load processing batch: ' +
                          batch_name)
        self._SF_data = SF
        self._accuracy_threshold_input = vd.components.LineEdit(
            value=0.8, dtype=float, style=dict(width='70px'))
        self._update_button = vd.components.Button(onclick=self._on_update,
                                                   class_='button',
                                                   label='Update')
        self._study_sorter_fig = StudySorterFigure(SF)
        self._study_sorter_table = vd.div()  # dummy

        vd.devel.loadBootstrap()

        self._update_accuracy_table()
Exemplo n.º 8
0
def runBatch(*, batch_name):
    batch = kb.loadObject(key=dict(batch_name=batch_name))
    jobs = batch['jobs']

    print('Running batch {} with {} jobs'.format(batch_name, len(jobs)))
    for job in jobs:
        _run_job(job)
Exemplo n.º 9
0
def prepareBatch(*, batch_name):
    batch = kb.loadObject(key=dict(batch_name=batch_name))
    jobs = batch['jobs']

    print('Preparing batch {} with {} jobs'.format(batch_name, len(jobs)))
    _clear_job_results(jobs=jobs, incomplete_only=True)
    _download_recordings(jobs=jobs)
Exemplo n.º 10
0
 def __init__(self):
     vd.Component.__init__(self)
     self._groups = kb.loadObject(key=dict(
         name='spikeforest_batch_group_names'))
     self._SEL_group = vd.components.SelectBox(
         options=self._groups['batch_group_names'])
     self._SEL_group.onChange(self._on_group_changed)
     self._BMW = sf.BatchMonitorWidget([], height=600)
     self._on_group_changed(value=self._SEL_group.value())
Exemplo n.º 11
0
 def loadResult(self):
   val=pa.get(self._key)
   if val:
     if val.startswith('in-process'):
       print('Inable to load result... it is in process.')
       return None
     else:
       return kb.loadObject(key=self._key)
   else:
     return None
Exemplo n.º 12
0
def runBatch(*,batch_name, test_one=False):
  print('Loading batch object...')
  batch=kb.loadObject(key=dict(batch_name=batch_name))
  jobs=batch['jobs']

  if test_one and (len(jobs)>0):
    jobs=[jobs[0]]

  print('Running batch with {} jobs...'.format(len(jobs)))
  for job in jobs:
    _run_job(job)
Exemplo n.º 13
0
def sf_batch_assemble(config):
    login(config)
    study_obj = kb.loadObject(key=dict(name='spikeforest_recordings'))
    recordings = select_recordings(study_obj, config)
    sorters = config['sorters']

    batch_output = dict(recordings=recordings,
                        sorters=sorters,
                        summarize_recording_results=[],
                        sorting_results=[])
    for ds in recordings:
        if config.get('summarize_recordings', None):
            print('ASSEMBLE: {}/{}'.format(ds['study'], ds['name']))
            key = dict(name='summarize_recording',
                       batch_name=config['name'],
                       study_name=ds['study'],
                       recording_name=ds['name'])
            result0 = kb.loadObject(key=key)
            if not result0:
                raise Exception(
                    'Problem loading summarize_recording result {}'.format(
                        json.dumps(key)))
            batch_output['summarize_recording_results'].append(result0)

        for sorter in sorters:
            print('ASSEMBLE: {} {}/{}'.format(sorter['name'], ds['study'],
                                              ds['name']))
            key = dict(name='sort_recording',
                       batch_name=config['name'],
                       study_name=ds['study'],
                       recording_name=ds['name'],
                       sorter_name=sorter['name'],
                       sorter_params=sorter['params'])
            result0 = kb.loadObject(key=key)
            if not result0:
                raise Exception(
                    'Problem loading sort_recording result {}'.format(
                        json.dumps(key)))
            batch_output['sorting_results'].append(result0)

    kb.saveObject(key=dict(batch_name=config['name'], ), object=batch_output)
Exemplo n.º 14
0
def sf_batch_run(config):
    login(config)
    study_obj = kb.loadObject(key=dict(name='spikeforest_recordings'))
    recordings = select_recordings(study_obj, config)
    sorters = config['sorters']

    code = ''.join(random.choice(string.ascii_uppercase) for x in range(10))
    for i, ds in enumerate(recordings):
        if config.get('summarize_recordings', None):
            key = dict(name='summarize_recording',
                       batch_name=config['name'],
                       study_name=ds['study'],
                       recording_name=ds['name'])
            if acquire_lock_for_key(key=key, code=code):
                try:
                    print(
                        '========= Summarizing recording {}/{}: {}/{}'.format(
                            i, len(recordings), ds['study'], ds['name']))
                    result0 = sf_summarize_recording(ds)
                except:
                    if check_consistent_code(key=key, code=code):
                        pa.set(key=key, value='error-' + code)
                    raise
                if check_consistent_code(key=key, code=code):
                    kb.saveObject(key=key, object=result0)
                else:
                    print('Warning: inconsistent code for {}'.format(
                        json.dumps(key)))

        for sorter in sorters:
            key = dict(name='sort_recording',
                       batch_name=config['name'],
                       study_name=ds['study'],
                       recording_name=ds['name'],
                       sorter_name=sorter['name'],
                       sorter_params=sorter['params'])
            if acquire_lock_for_key(key=key, code=code):
                try:
                    print(
                        '========= Sorting recording {}/{}: {} - {}/{}'.format(
                            i, len(recordings), sorter['name'], ds['study'],
                            ds['name']))
                    result0 = sf_sort_recording(sorter, ds)
                except:
                    if check_consistent_code(key=key, code=code):
                        pa.set(key=key, value='error-' + code)
                    raise
                if check_consistent_code(key=key, code=code):
                    kb.saveObject(key=key, object=result0)
                else:
                    print('Warning: inconsistent code for {}'.format(
                        json.dumps(key)))
Exemplo n.º 15
0
    def _on_group_changed(self, value):
        group_name = self._SEL_group.value()
        if not group_name:
            return
        a = kb.loadObject(
            key=dict(name='summarized_recordings', group_name=group_name))
        if not a:
            print('ERROR: unable to open recording group: ' + group_name)
            return

        if ('recordings' not in a) or ('studies' not in a):
            print('ERROR: problem with recording group: ' + group_name)
            return

        studies = a['studies']
        recordings = a['recordings']

        SF = sf.SFData()
        SF.loadStudies(studies)
        SF.loadRecordings2(recordings)

        sorter_names = self._sorter_names[group_name]
        for sorter_name in sorter_names:
            print('Loading sorting results for sorter: ' + sorter_name)
            b = kb.loadObject(key=dict(name='sorting_results',
                                       group_name=group_name,
                                       sorter_name=sorter_name))
            if not b:
                print('WARNING: unable to open sorting results for sorter: ' +
                      sorter_name)
                break
            SF.loadSortingResults(b['sorting_results'])

        self._SF = SF
        self._SEL_study.setOptions(SF.studyNames())
        self._on_study_changed(value=self._SEL_study.value())
Exemplo n.º 16
0
def clearBatch(*,batch_name, test_one=False):
  batch=kb.loadObject(key=dict(batch_name=batch_name))
  jobs=batch['jobs']

  if test_one and (len(jobs)>0):
    jobs=[jobs[0]]

  setBatchStatus(
    batch_name=batch_name,
    status='clearing_batch'
  )
  _clear_job_results(jobs=jobs,incomplete_only=False)
  setBatchStatus(
    batch_name=batch_name,
    status='finished_clearing_batch'
  )
Exemplo n.º 17
0
    def __init__(self):
        vd.Component.__init__(self)
        self._groups = kb.loadObject(
            key=dict(name='spikeforest_batch_group_names'))
        self._SEL_group = vd.components.SelectBox(
            options=self._groups['batch_group_names'])
        self._SEL_group.onChange(self._on_group_changed)
        self._SEL_study = vd.components.SelectBox(options=[])
        self._SEL_study.onChange(self._on_study_changed)
        self._SEL_recording = vd.components.SelectBox(options=[])
        self._SEL_recording.onChange(self._on_recording_changed)
        self._recording_widget = SFW.SFRecordingWidget()

        self._on_group_changed(value=self._SEL_group.value())

        vd.devel.loadBootstrap()
Exemplo n.º 18
0
 def loadRecordings(self, *, key=None):
     if key is None:
         key = dict(name='spikeforest_studies_processed')
     obj = kb.loadObject(key=key)
     studies = obj['studies']
     for study in studies:
         name = study['name']
         if name in self._studies_by_name:
             print('Study already loaded: ' + name)
         else:
             self._study_names.append(study['name'])
             S = SFStudy(study)
             self._studies_by_name[name] = S
     recordings = obj['recordings']
     for ds in recordings:
         study = ds['study']
         self._studies_by_name[study].addRecording(ds)
Exemplo n.º 19
0
 def _on_group_changed(self, value):
     group_name = self._SEL_group.value()
     a = kb.loadObject(
         key=dict(name='spikeforest_batch_group', group_name=group_name))
     SF = sf.SFData()
     for recordings_name in a['recordings_names']:
         try:
             SF.loadRecordings(key=dict(name=recordings_name))
         except:
             print('Warning: unable to load recordings: '+recordings_name)
     for batch_name in a['batch_names']:
         try:
             SF.loadProcessingBatch(batch_name=batch_name)
         except:
             print('Warning: unable to load processing batch: '+batch_name)
     self._SF = SF
     self._SEL_study.setOptions(SF.studyNames())
     self._on_study_changed(value=self._SEL_study.value())
Exemplo n.º 20
0
def load_tasks(run_code):
  obj=kb.loadObject(
    key=dict(name='spikeforest_studies'),
    share_ids=['spikeforest.spikeforest1']
  )
  
  datasets=obj['datasets']
  tasks=[]
  for i,ds in enumerate(datasets):
    key=dict(
            script='process_datasets',
            study_name=ds['study'],
            dataset_name=ds['name'],
            run_code=run_code
        )
    tasks.append(
        ProcessDatasetTask(key,ds)
    )
  return tasks
Exemplo n.º 21
0
    def __init__(self):
        vd.Component.__init__(self)
        self._group_names = kb.loadObject(
            key=dict(name='spikeforest_recording_group_names')
        )
        self._CB_use_summarized_recordings = CheckBox(
            label='Use summarized recordings', checked=True)
        self._CB_use_summarized_recordings.onChange(self._on_group_changed)
        self._SEL_group = vd.components.SelectBox(
            options=['']+self._group_names)
        self._SEL_group.onChange(self._on_group_changed)
        self._SEL_study = vd.components.SelectBox(options=[])
        self._SEL_study.onChange(self._on_study_changed)
        self._SEL_recording = vd.components.SelectBox(options=[])
        self._SEL_recording.onChange(self._on_recording_changed)
        self._recording_widget = SFW.SFRecordingWidget()

        self._on_group_changed(value=self._SEL_group.value())

        vd.devel.loadBootstrap()
Exemplo n.º 22
0
 def loadProcessingBatch(self, *, key):
     obj = kb.loadObject(key=key)
     job_results = obj['job_results']
     num_sorting_results = 0
     num_recording_summary_results = 0
     for X in job_results:
         if X['job']['command'] == 'sort_recording':
             study_name = X['job']['recording']['study']
             recording_name = X['job']['recording']['name']
             sorter_name = X['job']['sorter']['name']
             result = X['result']
             S = self.study(study_name)
             if S:
                 D = S.recording(recording_name)
                 if D:
                     num_sorting_results = num_sorting_results + 1
                     D.addSortingResult(result)
                 else:
                     print('Warning: recording not found: ' +
                           recording_name)
             else:
                 print('Warning: study not found: ' + study_name)
         elif X['job']['command'] == 'summarize_recording':
             study_name = X['job']['recording']['study']
             recording_name = X['job']['recording']['name']
             result = X['result']
             S = self.study(study_name)
             if S:
                 D = S.recording(recording_name)
                 if D:
                     num_recording_summary_results = num_recording_summary_results + 1
                     D.setSummaryResult(result)
                 else:
                     print('Warning: recording not found: ' +
                           recording_name)
             else:
                 print('Warning: study not found: ' + study_name)
         else:
             pass
     print('Loaded {} sorting results and {} recording summary results'.
           format(num_sorting_results, num_recording_summary_results))
Exemplo n.º 23
0
def assemble_dataset_results(run_code=default_run_code):
  tasks=load_tasks(run_code=run_code)

  results=[]
  for i,task in enumerate(tasks):
    print('Loading result for task {} of {}: {}/{}'.format(i+1,len(tasks),task.dataset()['study'],task.dataset()['name']))
    result=task.loadResult()
    if not result:
      raise Exception('Unable to load result for task.')
    results.append(result)

  key1=dict(name='spikeforest_studies')
  key2=dict(name='spikeforest_studies_processed')
  print('Saving results to... key={}'.format(json.dumps(key2)))
  obj=kb.loadObject(
    key=key1,
    share_ids=['spikeforest.spikeforest1']
  )
  obj['datasets']=results;
  datasets=obj['datasets']
  for ds in datasets:
      print(ds['study'],ds['name'])
  kb.saveObject(obj,key=key2)
Exemplo n.º 24
0
def prepareBatch(*,batch_name, test_one=False):
  batch=kb.loadObject(key=dict(batch_name=batch_name))
  jobs=batch['jobs']

  if test_one and (len(jobs)>0):
    jobs=[jobs[0]]

  setBatchStatus(
    batch_name=batch_name,
    status='preparing_batch'
  )
  _clear_job_results(jobs=jobs,incomplete_only=True)

  setBatchStatus(
    batch_name=batch_name,
    status='downloading_recordings'
  )
  _download_recordings(jobs=jobs)

  setBatchStatus(
    batch_name=batch_name,
    status='finished_preparing_batch'
  )
Exemplo n.º 25
0
def load_tasks(study_name,run_code=default_run_code):
    obj=kb.loadObject(
        key=dict(name='spikeforest_studies_processed'),
        share_ids=['spikeforest.spikeforest1']
    )
    
    datasets=obj['datasets']
    
    tasks=[]
    sorters=load_sorters()
    for ds in datasets:
        if ds['study']==study_name:
            for sorter in sorters:
                key=dict(
                    script='sort_datasets',
                    study_name=ds['study'],
                    dataset_name=ds['name'],
                    sorter_name=sorter['name'],
                    run_code=run_code
                )
                task=SortDatasetTask(dataset=ds,sorter=sorter,key=key)
                tasks.append(task)
    return tasks
Exemplo n.º 26
0
 def initialize(self):
     self._groups = kb.loadObject(
         key=dict(name='spikeforest_batch_group_names'))
     self._SEL_group.setOptions(['']+self._groups['batch_group_names'])
     self._SEL_group.setValue('magland_synth')
     self._on_group_changed(value=self._SEL_group.value())
Exemplo n.º 27
0
 def _on_group_changed(self, value):
     group_name = self._SEL_group.value()
     a = kb.loadObject(
         key=dict(name='spikeforest_batch_group', group_name=group_name))
     self._BMW.setBatchNames(a['batch_names'])
Exemplo n.º 28
0
    parser = argparse.ArgumentParser(
        description='Run SpikeForest batch processing')
    parser.add_argument('command', help='clear, prepare, run, assemble')
    parser.add_argument('batch_name', help='Name of the batch')
    args = parser.parse_args()

    batch_name = args.batch_name

    spikeforest_password = os.environ.get('SPIKEFOREST_PASSWORD', '')
    if not spikeforest_password:
        raise Exception('Environment variable not set: SPIKEFOREST_PASSWORD')

    print('Loading batch: ' + batch_name)
    sf.kbucketConfigRemote(name='spikeforest1-readwrite',
                           password=spikeforest_password)
    obj = kb.loadObject(key=dict(batch_name=batch_name))
    if not obj:
        raise Exception('Unable to find batches object.')

    command = args.command
    if command == 'clear':
        sf.sf_batch.clear_job_results(batch_name=batch_name,
                                      incomplete_only=False)
    elif command == 'prepare':
        sf.sf_batch.download_recordings(batch_name=batch_name)
        sf.sf_batch.clear_job_results(batch_name=batch_name,
                                      incomplete_only=True)
    elif command == 'run':
        sf.sf_batch.run_jobs(batch_name=batch_name)
    elif command == 'assemble':
        sf.sf_batch.assemble_job_results(batch_name=batch_name)
Exemplo n.º 29
0
def run_jobs(*, batch_name):
    batch = kb.loadObject(key=dict(batch_name=batch_name))
    jobs = batch['jobs']
    for job in jobs:
        run_job(job)
Exemplo n.º 30
0
def clear_job_results(*, batch_name, incomplete_only=True):
    batch = kb.loadObject(key=dict(batch_name=batch_name))
    jobs = batch['jobs']
    for job in jobs:
        clear_job_result(job, incomplete_only=incomplete_only)