コード例 #1
0
 def add_sorting(self, *, recording_id: str, label: str,
                 sorting: se.SortingExtractor):
     sorting_id = 'S-' + _random_id()
     if recording_id not in self._recordings:
         raise Exception(f'Recording not found: {recording_id}')
     if sorting_id in self._sortings:
         raise Exception(f'Duplicate sorting ID: {sorting_id}')
     le_recording = self._recordings[recording_id]
     x = {
         'sortingId':
         sorting_id,
         'sortingLabel':
         label,
         'sortingPath':
         kp.store_object(sorting.object(), basename=f'{label}.json'),
         'sortingObject':
         sorting.object(),
         'recordingId':
         recording_id,
         'recordingPath':
         le_recording['recordingPath'],
         'recordingObject':
         le_recording['recordingObject'],
         'description':
         f'Imported from Python: {label}'
     }
     sortings_subfeed = self._feed.get_subfeed(
         dict(workspaceName=self._workspace_name, key='sortings'))
     _import_le_sorting(sortings_subfeed, x)
     self._sortings[sorting_id] = x
     return x
コード例 #2
0
def main():
    # Here is an example where we configure a labbox deployment
    # Run this script and then pay attention to the output script

    # # Replace this with some name for your config
    # config_name = 'dubb'
    # # Replace this with the URI of the compute resource
    # crfeed_uri = 'feed://4dd6d6aa9e1d7be35e7374e6b35315bffefcfec27a9af36fa2e30bfd6753c5dc?name=dubb'

    # Replace this with some name for your config
    config_name = 'ephys1'
    # Replace this with the URI of the compute resource
    crfeed_uri = 'feed://09b27ce6c71add9fe6effaf351fce98d867d6fa002333a8b06565b0a108fb0ba?name=ephys1'

    config = {
        'job_handlers': {
            'local': {
                'type': 'local'
            },
            'partition1': {
                'type': 'remote',
                'uri': crfeed_uri,
                'cr_partition': 'partition1'
            },
            'partition2': {
                'type': 'remote',
                'uri': crfeed_uri,
                'cr_partition': 'partition2'
            },
            'partition3': {
                'type': 'remote',
                'uri': crfeed_uri,
                'cr_partition': 'partition3'
            },
            'timeseries': {
                'type': 'local'
            }
        }
    }
    config_uri = kp.store_object(config,
                                 basename=f'labbox_config_{config_name}.json')
    print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
    print(json.dumps(config, indent=4))
    print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
    print('')
    print(f'Configuration URI: {config_uri}')
    print('')
    print(
        f'To use this config, set the following environment variable in the deployed container:'
    )
    print(f'LABBOX_CONFIG_URI={config_uri}')
    print('')
    print(
        'You must also ensure that the kachery-p2p node of the deployed container has access to the compute resource'
    )
コード例 #3
0
def self_tag(myobject: Any, filename: str):
    myobject.pop('self_reference', None) # delete the self_reference key if it exists
    if DRY_RUN:
        new_ref = str(random.randrange(100000, 9999990)) + ' (pretend)'
    else:
        if VERBOSE: print(f'kachery-storing object {filename}.')
        new_ref = kp.store_object(myobject, basename=filename)
    myobject['self_reference'] = new_ref

    if VERBOSE: print(f'Persisting object with self-reference {new_ref}...')
    if VERY_VERBOSE: print(f'Persist object {myobject} with self-reference {new_ref}')
    if not DRY_RUN:
        with open(filename, 'w') as f:
            f.write(json.dumps(myobject, indent=4))
コード例 #4
0
def main():
    test1()

    f = kp.load_feed('feed://' + os.environ['FEED_ID'])
    N1 = 10000
    N2 = 1000
    a = kp.store_npy(np.meshgrid(np.arange(N1), np.arange(N2))[0])
    sf = f.get_subfeed('sf1')
    sf.append_message({'a': a, 'N1': N1, 'N2': N2})

    # test invalid manifest
    b = kp.store_npy(np.meshgrid(np.arange(N1 + 1), np.arange(N2))[0])
    invalid_manifest = kp.store_object({'invalid': True})
    b_invalid_manifest = b.split('?')[0] + '?manifest=' + ka.get_file_hash(
        invalid_manifest)
    sf.append_message({'b_invalid_manifest': b_invalid_manifest})
コード例 #5
0
 def add_recording(self, *, label: str, recording: se.RecordingExtractor):
     recording_id = 'R-' + _random_id()
     if recording_id in self._recordings:
         raise Exception(f'Duplicate recording ID: {recording_id}')
     x = {
         'recordingId':
         recording_id,
         'recordingLabel':
         label,
         'recordingPath':
         kp.store_object(recording.object(), basename=f'{label}.json'),
         'recordingObject':
         recording.object(),
         'description':
         f'Imported from Python: {label}'
     }
     recordings_subfeed = self._feed.get_subfeed(
         dict(workspaceName=self._workspace_name, key='recordings'))
     _import_le_recording(recordings_subfeed, x)
     self._recordings[recording_id] = x
     return recording_id
コード例 #6
0
def prepare_mh_sortings(le_recordings_by_id):
    x = [
        {
            'recording_id': 'allen_mouse419112_probeE',
            'sorter_name': 'hdsort',
            'npz_uri': 'sha1://28efb237ea07041eb94993a316c53c1f22f59c64/hdsort.npz?manifest=32dc916a479afa8fc7932c12818750bc6b3b9956'
        },
        {
            'recording_id': 'allen_mouse419112_probeE',
            'sorter_name': 'herdingspikes',
            'npz_uri': 'sha1://ad8ecc05529ca124ba204a9110c7a50d3f0916e0/herdingspikes.npz?manifest=640cd612d3791a4dd18b0fd704716a021ac6170b'
        },
        {
            'recording_id': 'allen_mouse419112_probeE',
            'sorter_name': 'ironclust',
            'npz_uri': 'sha1://63f8577ee830f6e854fa37fa6dc9f300ddf5dcd2/ironclust.npz?manifest=63da49e17999ecf6082a1b7b1fcd50574a83ff57'
        },
        {
            'recording_id': 'allen_mouse419112_probeE',
            'sorter_name': 'kilosort2',
            'npz_uri': 'sha1://b5cc1eed184a9cb544cd11f49141fe59e12d473c/kilosort2.npz?manifest=7d4ec32d692c9ed3b72aaefcf0c31aa0352ec95b'
        },
        {
            'recording_id': 'allen_mouse419112_probeE',
            'sorter_name': 'spykingcircus',
            'npz_uri': 'sha1://cb07e45b1b969ebfa5a29faf7156585365104349/spykingcircus.npz?manifest=1fdb0dd7642a816db185e975bf43c85fa9bb6578'
        },
        {
            'recording_id': 'svoboda-SC026_080619_g0_tcat_imec0',
            'sorter_name': 'hdsort',
            'npz_uri': 'sha1://dda1bfa8074c4a391bd941e6a341e493a0737768/hdsort.npz?manifest=6b0b78fe3508d1ddfed26b8666df1b7d94231c69'
        },
        {
            'recording_id': 'svoboda-SC026_080619_g0_tcat_imec0',
            'sorter_name': 'herdingspikes',
            'npz_uri': 'sha1://6136940a5e7d2beca95c35f3e000d38ce4d5e596/herdingspikes.npz?manifest=8ab6d0e2050d07f0c39e6dfb391c85513803e5ca'
        },
        {
            'recording_id': 'svoboda-SC026_080619_g0_tcat_imec0',
            'sorter_name': 'ironclust',
            'npz_uri': 'sha1://9bd3a55848d0ca9e98f899653e9554d965dbf6f1/ironclust.npz?manifest=dbf4ed27e7ce9e3fb4e6f00166423765c16bb161'
        },
        {
            'recording_id': 'svoboda-SC026_080619_g0_tcat_imec0',
            'sorter_name': 'kilosort2',
            'npz_uri': 'sha1://7c5100ee4cb77969a4697b524d12727315ac8f1e/kilosort2.npz?manifest=e36011dee42181fb4dcd76764658b848060a51f1'
        },
        {
            'recording_id': 'svoboda-SC026_080619_g0_tcat_imec0',
            'sorter_name': 'tridesclous',
            'npz_uri': 'sha1://20ac56455bc10c1c42c266d1773a4a58b258786f/tridesclous.npz?manifest=400f5b9a20d0bb3575f8e98859440db38aaccca7'
        },
        {
            'recording_id': 'cortexlab-single-phase-3',
            'sorter_name': 'hdsort',
            'npz_uri': 'sha1://d809e0ced7b37c059ee57fbda2f988a5b8dc1a55/hdsort.npz?manifest=fce43cc1a2850e0e7805a98539f24c0816a218e3'
        },
        {
            'recording_id': 'cortexlab-single-phase-3',
            'sorter_name': 'herdingspikes',
            'npz_uri': 'sha1://6b551be075b72dfa5c8df9a43541219630821197/herdingspikes.npz?manifest=b8ece277f8520feae2056f308e3269b6bd32e7a0'
        },
        {
            'recording_id': 'cortexlab-single-phase-3',
            'sorter_name': 'ironclust',
            'npz_uri': 'sha1://dfd2eaa009f6bc5b5c3f7eb979d0335f412cd575/ironclust.npz?manifest=0d9cedcf83a0de06be1a620777b2a5838e3c0d12'
        },
        {
            'recording_id': 'cortexlab-single-phase-3',
            'sorter_name': 'kilosort2',
            'npz_uri': 'sha1://3cf9943dedeb5f39344672ff701eebf12830d075/kilosort2.npz?manifest=8bbe8e6a536e63a274a3bd2e05ecc03116840855'
        },
        {
            'recording_id': 'cortexlab-single-phase-3',
            'sorter_name': 'spykingcircus',
            'npz_uri': 'sha1://d855d5314f36470719da17e4e5d2f48c808e65d3/spykingcircus.npz?manifest=af8e6189126b228ecde19237fb7a21807c7e2feb'
        },
        {
            'recording_id': 'cortexlab-single-phase-3',
            'sorter_name': 'tridesclous',
            'npz_uri': 'sha1://927721485f61cc9322536a8e9b457088b9dc16c7/tridesclous.npz?manifest=ee127bacf3d27de75b69313920af4691dd09c309'
        },
    ]
    le_sortings = []
    for a in x:
        recording_id = a['recording_id']
        sorter_name = a['sorter_name']
        print('{recording_id} {sorter_name}')
        npz_uri = a['npz_uri']
        d = kp.load_npy(npz_uri)
        print(d)
        sorting_object = {
            'sorting_format': 'npy2',
            'data': {
                'npz_uri': npz_uri,
                'unit_ids': d['unit_ids'].tolist(),
                'sampling_frequency': float(d['sampling_frequency'])
            }
        }
        sorting_path = kp.store_object(sorting_object, basename=recording_id + '--' + sorter_name + '.json')
        le_recording = le_recordings_by_id[recording_id]
        print(sorting_path)
        le_sortings.append(dict(
            sortingId=recording_id + ':mh-' + sorter_name,
            sortingLabel=recording_id + ':mh-' + sorter_name,
            sortingPath=sorting_path,
            sortingObject=sorting_object,

            recordingId=recording_id,
            recordingPath=le_recording['recordingPath'],
            recordingObject=le_recording['recordingObject'],

            tags=['contributed'],

            description=f'''
            {sorter_name} applied to {recording_id} (contributed by M. Hennig)
            '''.strip()
        ))
    return le_sortings
コード例 #7
0
        sortings.append_message(dict(
            action=action
        ))
    x = f.create_snapshot([
        dict(documentId='default', key='recordings'),
        dict(documentId='default', key='sortings')
    ])
    print(x.get_uri())
finally:
    f.delete()

known_recordings_dict = dict(
    recordings=le_recordings,
    sortings=le_sortings
)
known_recordings_uri = kp.store_object(known_recordings_dict, basename='known_recordings.json')

print('Uploading files to compute resource')
with hi.RemoteJobHandler(compute_resource_uri=compute_resource_uri) as jh:
    with hi.Config(job_handler=jh, container=True):
        upload_files_to_compute_resource([
            known_recordings_uri,
            x.get_uri(),
            [
                dict(
                    sortingId=x['sortingId'],
                    sortingPath=x['sortingPath'],
                    sortingObject=x['sortingObject']
                )
                for x in le_sortings
            ]
コード例 #8
0
        print(f"Updated study file {fname} to the following:\n{hydrated_file}")
    return hydrated_file


def main():
    cwd = getcwd()
    dirs = [join(cwd, d) for d in listdir(cwd) if isdir(join(cwd, d))]
    files = [f for f in listdir(cwd) if isfile(join(cwd, f))]
    directory_tail = split(cwd)[1]

    for f in files:
        if not check_file_relevance(f, directory_tail): continue
        print(f'Handling file: {f}')
        result = process_file(f, cwd)
        if result != None:
            superset['StudySets'].append(result)

    for d in dirs:
        # traverse any subdirectories (depth-first search). (We don't anticipate cycles or stack explosion in the intended use case.)
        print(f"\t\tcd to {d}")
        chdir(d)
        main()


if __name__ == "__main__":
    main()
    if VERY_VERBOSE:
        print(json.dumps(superset, indent=4))
    if len(superset['StudySets']) > 0 and not DRY_RUN:
        print(kp.store_object(superset, basename='studysets.json'))
コード例 #9
0
import spikeextractors as se
import numpy as np
import labbox_ephys as le
import kachery_p2p as kp

# Adjust these values
recording_label = 'despy_tet3'
sorting_label = 'sorting'
recording_nwb_path = '<path or URI of nwb recording>'
sorting_nwb_path = '<path or URI of nwb sorting>'
workspace_uri = '{workspaceUri}'

recording_uri = kp.store_object({
    'recording_format': 'nwb',
    'data': {
        'path': recording_nwb_path
    }
})
sorting_uri = kp.store_object({
    'sorting_format': 'nwb',
    'data': {
        'path': sorting_nwb_path
    }
})

sorting = le.LabboxEphysSortingExtractor(sorting_uri, samplerate=30000)
recording = le.LabboxEphysRecordingExtractor(recording_uri, download=True)

workspace = le.load_workspace(workspace_uri)
print(f'Workspace URI: {workspace.uri}')
R_id = workspace.add_recording(recording=recording, label=recording_label)