Exemple #1
0
    def __init__(self, dataset_directory, *, download=True):
        ca = _load_required_modules()

        RecordingExtractor.__init__(self)
        self._dataset_directory = dataset_directory
        timeseries0 = dataset_directory + '/raw.mda'
        self._dataset_params = read_dataset_params(dataset_directory)
        self._samplerate = self._dataset_params['samplerate'] * 1.0
        if is_kbucket_url(timeseries0):
            download_needed = is_url(ca.findFile(path=timeseries0))
        else:
            download_needed = is_url(timeseries0)
        if download and download_needed:
            print('Downloading file: ' + timeseries0)
            self._timeseries_path = ca.realizeFile(path=timeseries0)
            print('Done.')
        else:
            self._timeseries_path = ca.findFile(path=timeseries0)
        geom0 = dataset_directory + '/geom.csv'
        self._geom_fname = ca.realizeFile(path=geom0)
        self._geom = np.genfromtxt(self._geom_fname, delimiter=',')
        X = DiskReadMda(self._timeseries_path)
        if self._geom.shape[0] != X.N1():
            raise Exception(
                'Incompatible dimensions between geom.csv and timeseries file {} <> {}'.format(self._geom.shape[0],
                                                                                               X.N1()))
        self._num_channels = X.N1()
        self._num_timepoints = X.N2()
        for m in range(self._num_channels):
            self.setChannelProperty(m, 'location', self._geom[m, :])
Exemple #2
0
def _download_recordings(*, jobs):
    for ii, job in enumerate(jobs):
        val = ca.getValue(key=job)
        if not val:
            if 'recording' in job:
                if 'directory' in job['recording']:
                    dsdir = job['recording']['directory']
                    fname = dsdir + '/raw.mda'
                    print('REALIZING FILE: ' + fname)
                    ca.realizeFile(path=fname)
Exemple #3
0
def sf_sort_recording(sorter, recording):
    dsdir = recording['directory']
    sorting_params = sorter['params']
    processor_name = sorter['processor_name']
    if processor_name in Processors:
        SS = Processors[processor_name]
    else:
        raise Exception('No such sorter: ' + processor_name)

    outputs = SS.execute(recording_dir=dsdir,
                         channels=recording.get('channels', []),
                         firings_out=dict(ext='.mda'),
                         **sorting_params).outputs
    firings_out = ca.saveFile(path=outputs['firings_out'])
    firings_true_path = recording['directory'] + '/firings_true.mda'
    if not ca.realizeFile(path=firings_true_path):
        firings_true_path = None
    result = dict(recording_name=recording['name'],
                  study_name=recording['study'],
                  sorter_name=sorter['name'],
                  recording_dir=dsdir,
                  channels=recording.get('channels', []),
                  units_true=recording.get('units_true', []),
                  firings_true=firings_true_path,
                  sorting_params=sorting_params,
                  sorting_processor_name=SS.NAME,
                  sorting_processor_version=SS.VERSION,
                  firings=firings_out)
    result['summary'] = sf.summarizeSorting(result)
    if result.get('firings_true', None):
        result['comparison_with_truth'] = sf.compareWithTruth(result)

    return result
def compute_recording_info(recording):
  out=ComputeRecordingInfo.execute(
    recording_dir=recording['directory'],
    channels=recording.get('channels',[]),
    json_out={'ext':'.json'}
  ).outputs['json_out']
  ca.saveFile(path=out)
  return read_json_file(ca.realizeFile(path=out))
Exemple #5
0
    def __init__(self, firings_file):
        ca = _load_required_modules()

        SortingExtractor.__init__(self)
        if is_kbucket_url(firings_file):
            download_needed = is_url(ca.findFile(path=firings_file))
        else:
            download_needed = is_url(firings_file)
        if download_needed:
            print('Downloading file: ' + firings_file)
            self._firings_path = ca.realizeFile(path=firings_file)
            print('Done.')
        else:
            self._firings_path = ca.realizeFile(path=firings_file)
        self._firings = readmda(self._firings_path)
        self._times = self._firings[1, :]
        self._labels = self._firings[2, :]
        self._unit_ids = np.unique(self._labels).astype(int)
Exemple #6
0
def read_dataset_params(dsdir):
    #ca = _load_required_modules()
    fname1 = dsdir + '/params.json'
    fname2 = ca.realizeFile(path=fname1)
    if not fname2:
        raise Exception('Unable to find file: ' + fname1)
    if not os.path.exists(fname2):
        raise Exception('Dataset parameter file does not exist: ' + fname2)
    with open(fname2) as f:
        return json.load(f)
Exemple #7
0
 def _on_view_true_unit_autocorrelograms(self):
     dirname = self._recording.directory()
     img = PlotAutoCorrelograms.execute(recording_dir=dirname,
                                        channels=[],
                                        firings=dirname +
                                        '/firings_true.mda',
                                        plot_out={
                                            'ext': '.jpg'
                                        }).outputs['plot_out']
     img = ca.realizeFile(img)
     self._view = ImageView(img)
     self.refresh()
Exemple #8
0
 def plot(self, name, format='image'):
     plots = self._obj['summary'].get('plots', dict())
     url = plots[name]
     if format == 'url':
         return url
     else:
         path = ca.realizeFile(url)
         if format == 'image':
             return Image.open(path)
         elif format == 'path':
             return path
         else:
             raise Exception('Invalid format: ' + format)
def sf_summarize_recording(recording):
  ret=deepcopy(recording)
  ret['computed_info']=compute_recording_info(recording)  
  firings_true_path=recording['directory']+'/firings_true.mda'
  ret['plots']=dict(
    timeseries=create_timeseries_plot(recording)
  )
  channels=recording.get('channels',None)
  units=recording.get('units_true',None)
  if ca.realizeFile(path=firings_true_path):
    ret['firings_true']=firings_true_path
    ret['plots']['waveforms_true']=create_waveforms_plot(recording,ret['firings_true'])
    true_units_info_fname=compute_units_info(recording_dir=recording['directory'],firings=firings_true_path,return_format='filename',channel_ids=channels,unit_ids=units)
    ret['true_units_info']=ca.saveFile(path=true_units_info_fname,basename='true_units_info.json')
  return ret
Exemple #10
0
def compute_units_info(*,recording_dir,firings,channel_ids=[],unit_ids=[],return_format='json'):
    out=ComputeUnitsInfo.execute(
      recording_dir=recording_dir,
      firings=firings,
      unit_ids=unit_ids,
      channel_ids=channel_ids,
      json_out={'ext':'.json'},
      _container='default'
    ).outputs
    fname=out['json_out']
    if return_format=='filename':
      return fname
    else:
      fname=ca.realizeFile(path=fname)
      with open(fname) as f:
        return json.load(f)
Exemple #11
0
 def realizeRecordingFile(self):
     fname = self.directory() + '/raw.mda'
     return ca.realizeFile(fname)
Exemple #12
0
 def realizeFiringsTrueFile(self):
     fname = self.directory() + '/firings_true.mda'
     return ca.realizeFile(fname)
Exemple #13
0
print(ca.getValue(key='some_key2', password='******'))

print('------------------------------------------------')

# Local storage of data and files, retrievable by SHA-1 hash

path = ca.saveText('This is some text', basename='test.txt')
print(path)
# Output: sha1://482cb0cfcbed6740a2bcb659c9ccc22a4d27b369/test.txt

# Later we can use this to retrieve the text
txt = ca.loadText(path=path)
print(txt)

# ... or retrieve the path to a local file containing the text
fname = ca.realizeFile(path=path)
print(fname)
# Output: /tmp/sha1-cache/4/82/482cb0cfcbed6740a2bcb659c9ccc22a4d27b369

# Or we can store some large text by key and retrieve it later
ca.saveText(key=dict(name='key-for-repeating-text'),
            text='some large repeating text'*100)
txt = ca.loadText(key=dict(name='key-for-repeating-text'))
print(len(txt))  # Output: 2500

print('------------------------------------------------')

# Similarly we can store python dicts via json content
path = ca.saveObject(dict(some='object'), basename='object.json')
print(path)
# Output: sha1://b77fdda467b03d7a0c3e06f6f441f689ac46e817/object.json