def compute_recording_info(recording):
  out=ComputeRecordingInfo.execute(
    recording_dir=recording['directory'],
    channels=recording.get('channels',[]),
    json_out={'ext':'.json'}
  ).outputs['json_out']
  ca.saveFile(path=out)
  return read_json_file(ca.realizeFile(path=out))
def create_timeseries_plot(recording):
  out=CreateTimeseriesPlot.execute(
    recording_dir=recording['directory'],
    channels=recording.get('channels',[]),
    jpg_out={'ext':'.jpg'}
  ).outputs['jpg_out']
  return ca.saveFile(path=out,basename='timeseries.jpg')
示例#3
0
def sf_sort_recording(sorter, recording):
    dsdir = recording['directory']
    sorting_params = sorter['params']
    processor_name = sorter['processor_name']
    if processor_name in Processors:
        SS = Processors[processor_name]
    else:
        raise Exception('No such sorter: ' + processor_name)

    outputs = SS.execute(recording_dir=dsdir,
                         channels=recording.get('channels', []),
                         firings_out=dict(ext='.mda'),
                         **sorting_params).outputs
    firings_out = ca.saveFile(path=outputs['firings_out'])
    firings_true_path = recording['directory'] + '/firings_true.mda'
    if not ca.realizeFile(path=firings_true_path):
        firings_true_path = None
    result = dict(recording_name=recording['name'],
                  study_name=recording['study'],
                  sorter_name=sorter['name'],
                  recording_dir=dsdir,
                  channels=recording.get('channels', []),
                  units_true=recording.get('units_true', []),
                  firings_true=firings_true_path,
                  sorting_params=sorting_params,
                  sorting_processor_name=SS.NAME,
                  sorting_processor_version=SS.VERSION,
                  firings=firings_out)
    result['summary'] = sf.summarizeSorting(result)
    if result.get('firings_true', None):
        result['comparison_with_truth'] = sf.compareWithTruth(result)

    return result
def create_waveforms_plot(recording,firings):
  out=CreateWaveformsPlot.execute(
    recording_dir=recording['directory'],
    channels=recording.get('channels',[]),
    units=recording.get('units_true',[]),
    firings=firings,
    jpg_out={'ext':'.jpg'}
  ).outputs['jpg_out']
  return ca.saveFile(path=out,basename='waveforms.jpg')
def sf_summarize_recording(recording):
  ret=deepcopy(recording)
  ret['computed_info']=compute_recording_info(recording)  
  firings_true_path=recording['directory']+'/firings_true.mda'
  ret['plots']=dict(
    timeseries=create_timeseries_plot(recording)
  )
  channels=recording.get('channels',None)
  units=recording.get('units_true',None)
  if ca.realizeFile(path=firings_true_path):
    ret['firings_true']=firings_true_path
    ret['plots']['waveforms_true']=create_waveforms_plot(recording,ret['firings_true'])
    true_units_info_fname=compute_units_info(recording_dir=recording['directory'],firings=firings_true_path,return_format='filename',channel_ids=channels,unit_ids=units)
    ret['true_units_info']=ca.saveFile(path=true_units_info_fname,basename='true_units_info.json')
  return ret
示例#6
0
#!/usr/bin/env python

from cairio import client as ca

ca.autoConfig(collection='spikeforest',
              key='spikeforest2-readwrite',
              ask_password=True)
sha1_path = ca.saveFile('spyking_circus.simg')
print(sha1_path)
示例#7
0
#!/usr/bin/env python

from cairio import client as ca

ca.autoConfig(collection='spikeforest',
              key='spikeforest2-readwrite',
              ask_password=True)
sha1_path = ca.saveFile('yass.simg')
print(sha1_path)
示例#8
0
def _set_job_console_output(*, batch_name, job_index, file_name):
    key = dict(name='batcho_job_console_output',
               batch_name=batch_name, job_index=job_index)
    return ca.saveFile(key=key, path=file_name)
#!/usr/bin/env python

from cairio import client as ca

ca.autoConfig(collection='spikeforest',
              key='spikeforest2-readwrite',
              ask_password=True)
sha1_path = ca.saveFile('mountaintools_basic.simg')
print(sha1_path)
示例#10
0
def summarize_recordings(recordings, compute_resource=None):
    print('>>>>>> summarize recordings')
    jobs_info = []
    jobs_timeseries_plot = []
    jobs_units_info = []
    for recording in recordings:
        print('Creating jobs for recording: {}/{}'.format(
            recording.get('study', ''), recording.get('name', '')))
        raw_path = recording['directory'] + '/raw.mda'
        firings_true_path = recording['directory'] + '/firings_true.mda'
        channels = recording.get('channels', None)
        units = recording.get('units_true', None)

        if not ca.findFile(path=firings_true_path):
            raise Exception('firings_true file not found: ' +
                            firings_true_path)
        job = ComputeRecordingInfo.createJob(
            recording_dir=recording['directory'],
            channels=recording.get('channels', []),
            json_out={
                'ext': '.json',
                'upload': True
            },
            _container='default')
        job['files_to_realize'] = [raw_path, firings_true_path]
        jobs_info.append(job)
        # job=CreateTimeseriesPlot.createJob(
        #     recording_dir=recording['directory'],
        #     channels=recording.get('channels',[]),
        #     jpg_out={'ext':'.jpg','upload':True},
        #     _container='default'
        # )
        # jobs_timeseries_plot.append(job)
        job = ComputeUnitsInfo.createJob(recording_dir=recording['directory'],
                                         firings=recording['directory'] +
                                         '/firings_true.mda',
                                         unit_ids=units,
                                         channel_ids=channels,
                                         json_out={
                                             'ext': '.json',
                                             'upload': True
                                         },
                                         _container='default')
        jobs_units_info.append(job)

    # all_jobs=jobs_info+jobs_timeseries_plot+jobs_units_info
    all_jobs = jobs_info + jobs_units_info
    label = 'Summarize recordings'
    mlpr.executeBatch(jobs=all_jobs,
                      label=label,
                      num_workers=None,
                      compute_resource=compute_resource)

    summarized_recordings = []
    for i, recording in enumerate(recordings):
        firings_true_path = recording['directory'] + '/firings_true.mda'

        summary = dict()

        result0 = jobs_info[i]['result']
        summary['computed_info'] = ca.loadObject(
            path=result0['outputs']['json_out'])

        # result0=jobs_timeseries_plot[i]['result']
        # summary['plots']=dict(
        #     timeseries=ca.saveFile(path=result0['outputs']['jpg_out'],basename='timeseries.jpg')
        # )
        summary['plots'] = dict()

        result0 = jobs_units_info[i]['result']
        summary['true_units_info'] = ca.saveFile(
            path=result0['outputs']['json_out'],
            basename='true_units_info.json')

        rec2 = deepcopy(recording)
        rec2['summary'] = summary
        summarized_recordings.append(rec2)

    return summarized_recordings
#!/usr/bin/env python

from cairio import client as ca

ca.autoConfig(collection='spikeforest',
              key='spikeforest2-readwrite',
              ask_password=True)
sha1_path = ca.saveFile('mountainsort4.simg')
print(sha1_path)
示例#12
0
# Similarly we can store python dicts via json content
path = ca.saveObject(dict(some='object'), basename='object.json')
print(path)
# Output: sha1://b77fdda467b03d7a0c3e06f6f441f689ac46e817/object.json

retrieved_object = ca.loadObject(path=path)
print(retrieved_object)

# Or store objects by key
ca.saveObject(object=dict(some_other='object'), key=dict(some='key'))
obj = ca.loadObject(key=dict(some='key'))
print(obj)

print('------------------------------------------------')

# You can do the same with files
with open('test___.txt', 'w') as f:
    f.write('some file content')
path = ca.saveFile('test___.txt')
print(path)
# Output: sha1://ee025361a15e3e8074e9c0b44b4f98aabc829b3d/test___.txt

# Then load the text of the file at a later time
txt = ca.loadText(path=path)
print(txt)

# REMOTE DATABASE

# The interesting part comes when we connect to a remote cairio database