예제 #1
0
def import_sorting(*, feed: kp.Feed, workspace_name: str,
                   recording: se.RecordingExtractor,
                   sorting: se.SortingExtractor, recording_id: str,
                   sorting_label: str):
    sorting_id = 'S-' + random_id()
    x = {
        'sortingId':
        sorting_id,
        'sortingLabel':
        sorting_label,
        'sortingPath':
        ka.store_object(sorting.object(), basename=f'{sorting_label}.json'),
        'sortingObject':
        sorting.object(),
        'recordingId':
        recording_id,
        'recordingPath':
        ka.store_object(recording.object(), basename=f'{recording_id}.json'),
        'recordingObject':
        recording.object(),
        'description':
        f'Imported from Python: {sorting_label}'
    }
    sortings_subfeed = feed.get_subfeed(
        dict(workspaceName=workspace_name, key='sortings'))
    _import_le_sorting(sortings_subfeed, x)
    return x
예제 #2
0
def prepare_cortexlab_drift_datasets():
    recording_obj_dataset1, manip_timestamps1, manip_positions1 = prepare_recording(
        bin_uri=
        'sha1://294a665f4e4de1c7377a47182941d22da45d6ff7/steinmetz_dataset1.p2_g0_t0.imec0.ap.bin?manifest=dc01ff169e44b538e3c009b10783b43f57c068e6',
        bin_file_size=45205648180,
        raw_num_channels=
        385,  # guessing this so that bin_file_size is divisible by raw_num_channels*2
        chanmap_mat_uri=
        'sha1://4693f77e3883861f28dc2a634f0e1e5776bc7167/dataset1/NP2_kilosortChanMap.mat',
        manip_timestamps_uri=
        'sha1://1117aac1f15e441fc82854a736e52e4b87e6d90c/dataset1/manip.timestamps_p2.npy',
        manip_positions_uri=
        'sha1://9d4e8e9265573707cd1890eefa50fda6a8bd8ae5/manip.positions.npy',
        meta_uri=
        'sha1://6cd209edd2221d8814f12ad883220482a5bde3ff/dataset1/p2_g0_t0.imec0.ap.meta'  # perhaps will use in future
    )
    recording_obj_dataset2, manip_timestamps2, manip_positions2 = prepare_recording(
        bin_uri=
        'sha1://840a6e81e9c7e6e0f9aedc8a17ce32fb22fe3eb3/steinmetz_dataset2.p2_g1_t0.imec0.ap.bin?manifest=e73b452d6e09b6495024b85835b21a7a72dd6a5a',
        bin_file_size=62099840880,
        raw_num_channels=
        385,  # guessing this so that bin_file_size is divisible by raw_num_channels*2
        chanmap_mat_uri=
        'sha1://4693f77e3883861f28dc2a634f0e1e5776bc7167/dataset1/NP2_kilosortChanMap.mat',  # assuming same as dataset1
        manip_timestamps_uri=
        'sha1://b03ea67a69cbbcba214582cf6de1154bcf6b1f92/manip.timestamps.npy',
        manip_positions_uri=
        'sha1://9d4e8e9265573707cd1890eefa50fda6a8bd8ae5/manip.positions.npy',
        meta_uri=
        'sha1://b7d175b3ddbe73d802244b209be58230a965f394/p2_g1_t0.imec0.ap.meta'  # perhaps will use in future
    )

    le_recordings = []
    le_recordings.append(
        dict(recordingId='cortexlab-drift-dataset1',
             recordingLabel='cortexlab-drift-dataset1',
             recordingPath=ka.store_object(
                 recording_obj_dataset1,
                 basename='cortexlab-drift-dataset1.json'),
             recordingObject=recording_obj_dataset1,
             description='''
        Neuropixels 2 recording with imposed drift (dataset1).
        '''.strip()))
    le_recordings.append(
        dict(recordingId='cortexlab-drift-dataset2',
             recordingLabel='cortexlab-drift-dataset2',
             recordingPath=ka.store_object(
                 recording_obj_dataset2,
                 basename='cortexlab-drift-dataset2.json'),
             recordingObject=recording_obj_dataset2,
             description='''
        Neuropixels 2 recording with imposed drift (dataset2).
        '''.strip()))

    return le_recordings
예제 #3
0
def _internal_serialize_result(result):
    import kachery as ka
    ret: Dict[Any] = dict(
        output_files=dict()
    )
    ret['name'] = 'hither_result'

    ret['runtime_info'] = deepcopy(result.runtime_info)
    ret['runtime_info']['console_out'] = ka.store_object(ret['runtime_info'].get('console_out', ''))

    for oname in result._output_names:
        path = getattr(result.outputs, oname)._path
        if path is not None:
            ret['output_files'][oname] = ka.store_file(path)
        else:
            ret['output_files'][oname] = None

    ret['retval'] = result.retval
    ret['success'] = result.success
    ret['version'] = result.version
    ret['container'] = result.container
    ret['hash_object'] = result.hash_object
    ret['hash'] = ka.get_object_hash(result.hash_object)
    ret['status'] = result.status
    return ret
예제 #4
0
def run_test(test_nodes, tmpdir):
    api_port = 30001
    try:
        # Start the daemons
        for tn in test_nodes:
            d = TestDaemon(label='d',
                           channels=tn['channels'],
                           api_port=api_port,
                           storage_dir=tmpdir +
                           f'/test_storage_{api_port}_{_randstr(5)}',
                           port=tn['port'],
                           websocket_port=tn['websocket_port'],
                           bootstraps=tn['bootstraps'],
                           isbootstrap=tn['isbootstrap'],
                           nomulticast=True)
            tn['daemon'] = d
            tn['api_port'] = api_port
            print(f'starting daemon: {tn["name"]}')
            d.start()
            api_port = api_port + 1

        # pause
        time.sleep(0.5)

        # Store some objects
        for tn in test_nodes:
            d = tn['daemon']
            tn['uris'] = []
            with d.testEnv():
                import kachery as ka
                for obj in tn['objects_to_store']:
                    uri = ka.store_object(obj)
                    tn['uris'].append(uri)

        # Pause
        time.sleep(10)

        # Load the objects
        for tn in test_nodes:
            d = tn['daemon']
            with d.testEnv():
                import kachery as ka
                import kachery_p2p as kp
                for tn2 in test_nodes:
                    if tn['name'] != tn2['name']:
                        for uri in tn2['uris']:
                            print(
                                f'Node {tn["name"]} is loading {uri} from node {tn2["name"]}'
                            )
                            obj = kp.load_object(uri)
                            assert (obj is not None)
    finally:
        with PreventKeyboardInterrupt():
            for tn in test_nodes:
                d = tn['daemon']
                print(f'stopping daemon: {tn["name"]}')
                try:
                    d.stop()
                except:
                    print('WARNING: Failed to stop daemon.')
예제 #5
0
def main():
    snippets_h5_uri = 'sha1://55c0cb6a63231236b6948b0dd422e6fedc75c5b5/real_snippets.h5?manifest=b124474caccccdba135d9550ec544a88caf531aa'
    recording_obj = {
        'recording_format': 'snippets1',
        'data': {
            'snippets_h5_uri': snippets_h5_uri
        }
    }
    sorting_obj = {
        'sorting_format': 'snippets1',
        'data': {
            'snippets_h5_uri': snippets_h5_uri
        }
    }
    recording = le.LabboxEphysRecordingExtractor(recording_obj)
    sorting = le.LabboxEphysSortingExtractor(sorting_obj)
    print(recording.get_sampling_frequency())
    print(recording.get_channel_ids())

    le_recordings = []
    le_sortings = []

    le_recordings.append(
        dict(recordingId='loren_example1',
             recordingLabel='loren_example1',
             recordingPath=ka.store_object(recording_obj,
                                           basename='loren_example1.json'),
             recordingObject=recording_obj,
             description='''
        Example from Loren Frank
        '''.strip()))
    le_sortings.append(
        dict(sortingId='loren_example1:mountainsort4',
             sortingLabel='loren_example1:mountainsort4',
             sortingPath=ka.store_object(
                 sorting_obj, basename='loren_example-mountainsort4.json'),
             sortingObject=sorting_obj,
             recordingId='loren_example1',
             recordingPath=ka.store_object(recording_obj,
                                           basename='loren_example1.json'),
             recordingObject=recording_obj,
             description='''
        Example from Loren Frank (MountainSort4)
        '''.strip()))

    feed_uri = create_labbox_ephys_feed(le_recordings, le_sortings)
    print(feed_uri)
예제 #6
0
def main():
    snippets_h5_uri = 'sha1://5fc6996dfed9e7fd577bc85194d982a1ba52085e/real_snippets_1.h5?manifest=741f23273c3121aada6d9bdb67009c8c2ae1ed77'
    recording_obj = {
        'recording_format': 'snippets1',
        'data': {
            'snippets_h5_uri': snippets_h5_uri
        }
    }
    sorting_obj = {
        'sorting_format': 'snippets1',
        'data': {
            'snippets_h5_uri': snippets_h5_uri
        }
    }
    recording = le.LabboxEphysRecordingExtractor(recording_obj)
    sorting = le.LabboxEphysSortingExtractor(sorting_obj)
    print(recording.get_sampling_frequency())
    print(recording.get_channel_ids())

    le_recordings = []
    le_sortings = []

    le_recordings.append(
        dict(recordingId='loren_example1',
             recordingLabel='loren_example1',
             recordingPath=ka.store_object(recording_obj,
                                           basename='loren_example1.json'),
             recordingObject=recording_obj,
             description='''
        Example from Loren Frank
        '''.strip()))
    le_sortings.append(
        dict(sortingId='loren_example1:mountainsort4',
             sortingLabel='loren_example1:mountainsort4',
             sortingPath=ka.store_object(
                 sorting_obj, basename='loren_example-mountainsort4.json'),
             sortingObject=sorting_obj,
             recordingId='loren_example1',
             recordingPath=ka.store_object(recording_obj,
                                           basename='loren_example1.json'),
             recordingObject=recording_obj,
             description='''
        Example from Loren Frank (MountainSort4)
        '''.strip()))

    feed_uri = create_labbox_ephys_feed(le_recordings, le_sortings)
    print(feed_uri)
def main():
    SF_STUDY_SETS = kp.load_object(
        'sha1://54d9ed77a2aa788b9ab67977476c2b51adb8a2c5/studysets.json'
    )['StudySets']
    STUDY_SETS = []
    for SF_STUDY_SET in SF_STUDY_SETS:
        if SF_STUDY_SET['name'] in study_set_names:
            STUDY_SET = {
                'name': SF_STUDY_SET['name'],
                'info': SF_STUDY_SET['info'],
                'description': SF_STUDY_SET['description'],
                'studies': []
            }
            for SF_STUDY in SF_STUDY_SET['studies']:
                STUDY = {
                    'name': SF_STUDY['name'],
                    'studySetName': SF_STUDY['studySetName'],
                    'recordings': []
                }
                for SF_RECORDING in SF_STUDY[
                        'recordings'][:
                                      3]:  # for now only load up to 3 recordings per study
                    recording_object = create_recording_object_from_spikeforest_recdir(
                        SF_RECORDING['directory'], label=SF_RECORDING['name'])
                    sorting_object = create_sorting_object_from_spikeforest_recdir(
                        SF_RECORDING['directory'], label=SF_RECORDING['name'])
                    print(
                        '********************************************************************************************'
                    )
                    print(
                        f"{SF_RECORDING['studySetName']} {SF_RECORDING['studyName']} {SF_RECORDING['name']}"
                    )
                    print(
                        '********************************************************************************************'
                    )
                    RECORDING = {
                        "name": SF_RECORDING["name"],
                        "studyName": SF_RECORDING["studyName"],
                        "studySetName": SF_RECORDING["studySetName"],
                        "recordingObject": recording_object,
                        "sortingObject": sorting_object,
                        "sampleRateHz": SF_RECORDING["sampleRateHz"],
                        "numChannels": SF_RECORDING["numChannels"],
                        "durationSec": SF_RECORDING["durationSec"],
                        "numTrueUnits": SF_RECORDING["numTrueUnits"],
                        "old": {
                            "directory": SF_RECORDING["directory"],
                            "firingsTrue": SF_RECORDING["firingsTrue"],
                            "spikeSign": SF_RECORDING["spikeSign"]
                        }
                    }
                    STUDY['recordings'].append(RECORDING)
                STUDY_SET['studies'].append(STUDY)
            STUDY_SETS.append(STUDY_SET)
    spikeforest_study_sets = {'studysets': STUDY_SETS}
    # spikeforest_obj['self_reference'] = ka.store_object(spikeforest_obj)
    spikeforest_study_sets_path = ka.store_object(
        spikeforest_study_sets, basename='spikeforest_study_sets.json')
    print(spikeforest_study_sets_path)
예제 #8
0
def register_groundtruth(*, recdir, output_fname, label, to):
    with ka.config(to=to):
        raw_path = ka.store_file(recdir + '/raw.mda')
        obj = dict(firings=raw_path)
        obj['self_reference'] = ka.store_object(
            obj, basename='{}.json'.format(label))
        with open(output_fname, 'w') as f:
            json.dump(obj, f, indent=4)
def patch_recording_geom(recording, geom_fname):
    print(f'PATCHING geom for recording: {recording["name"]}')
    geom_info = ka.get_file_info(geom_fname)
    x = recording['directory']
    y = ka.store_dir(x).replace('sha1dir://', 'sha1://')
    obj = ka.load_object(y)
    obj['files']['geom.csv'] = dict(size=geom_info['size'],
                                    sha1=geom_info['sha1'])
    x2 = ka.store_object(obj)
    recording['directory'] = 'sha1dir://' + ka.get_file_hash(x2) + '.patched'
예제 #10
0
 def create_snapshot(self, subfeed_names: list):
     subfeeds = dict()
     for subfeed_name in subfeed_names:
         subfeed = self.get_subfeed(subfeed_name)
         messages = subfeed.get_next_messages(wait_msec=0)
         subfeeds[subfeed.get_subfeed_hash()] = dict(
             subfeedHash=subfeed.get_subfeed_hash(), messages=messages)
     snapshot_uri = ka.store_object(dict(subfeeds=subfeeds),
                                    basename='feed.json')
     return Feed(snapshot_uri)
예제 #11
0
def register_recording(*, recdir, output_fname, label, to):
    with ka.config(to=to):
        raw_path = ka.store_file(recdir + '/raw.mda')
        obj = dict(raw=raw_path,
                   params=ka.load_object(recdir + '/params.json'),
                   geom=np.genfromtxt(ka.load_file(recdir + '/geom.csv'),
                                      delimiter=',').tolist())
        obj['self_reference'] = ka.store_object(
            obj, basename='{}.json'.format(label))
        with open(output_fname, 'w') as f:
            json.dump(obj, f, indent=4)
예제 #12
0
def register_study(*,
                   path_from,
                   path_to,
                   studySetName,
                   studyName,
                   to='default_readwrite'):
    list_rec = [
        str(f) for f in os.listdir(path_from)
        if os.path.isdir(os.path.join(path_from, f))
    ]
    print('# files: {}'.format(len(list_rec)))
    study_obj = dict(name=studyName, studySetName=studySetName, recordings=[])
    mkdir_(path_to)
    for rec1 in list_rec:
        print(f'Uploading {rec1}')
        path_rec1 = os.path.join(path_from, rec1)
        register_groundtruth(recdir=path_rec1,
                             output_fname=os.path.join(
                                 path_to, rec1 + '.firings_true.json'),
                             label=rec1)
        rec = MdaRecordingExtractor(recording_directory=path_rec1)
        sorting = MdaSortingExtractor(firings_file=path_rec1 +
                                      '/firings_true.mda',
                                      samplerate=rec.get_sampling_frequency())
        recording_obj = dict(
            name=rec1,
            studyName=studyName,
            studySetName=studySetName,
            directory=ka.store_dir(path_rec1),
            firingsTrue=ka.store_file(os.path.join(path_to, rec1 +
                                                   '.firings_true.json'),
                                      basename='firings_true.json'),
            sampleRateHz=rec.get_sampling_frequency(),
            numChannels=len(rec.get_channel_ids()),
            durationSec=rec.get_num_frames() / rec.get_sampling_frequency(),
            numTrueUnits=len(sorting.get_unit_ids()),
            spikeSign=-1  # TODO: get this from params.json
        )
        study_obj['recordings'].append(recording_obj)
        # update .json files
        register_recording(recdir=path_rec1,
                           output_fname=os.path.join(path_to, rec1 + '.json'),
                           label=rec1)
    study_obj['self_reference'] = ka.store_object(study_obj)
    with open(os.path.join(path_to, studyName + '.json'), 'w') as f:
        json.dump(study_obj, f, indent=4)
    return study_obj
def main():
    thisdir = os.path.dirname(os.path.realpath(__file__))
    studysets_obj_path = ka.load_text(thisdir + '/../../recordings/studysets')
    with ka.config(fr='default_readonly'):
        studysets_obj = ka.load_object(path=studysets_obj_path)
    # studysets_obj['StudySets']
    new_study_sets = []
    for ss in studysets_obj['StudySets']:
        if ss['name'] != 'PAIRED_ENGLISH':
            new_study_sets.append(ss)
    studyset_obj_path = thisdir + '/../../recordings/PAIRED_ENGLISH/PAIRED_ENGLISH.json'
    studyset_obj = ka.load_object(studyset_obj_path)
    assert studyset_obj is not None, f'Missing file: {studyset_obj_path}'
    new_study_sets.append(studyset_obj)
    studysets_obj['StudySets'] = new_study_sets
    with ka.config(fr='default_readwrite'):
        studysets_obj_path = ka.store_object(studysets_obj,
                                             basename='studysets.json')
    with open(thisdir + '/../../recordings/studysets', 'w') as f:
        f.write(studysets_obj_path)
import kachery_p2p as kp
import kachery as ka


# Adjust these values
recording_label = 'despy_tet3'
sorting_label = 'sorting'
recording_nwb_path = '<path or URI of nwb recording>'
sorting_nwb_path = '<path or URI of nwb sorting>'
feed_uri = '{feedUri}'
workspace_name = '{workspaceName}'


recording_uri = ka.store_object({
    'recording_format': 'nwb',
    'data': {
        'path': recording_nwb_path
    }
})
sorting_uri = ka.store_object({
    'sorting_format': 'nwb',
    'data': {
        'path': sorting_nwb_path
    }
})

sorting = le.LabboxEphysSortingExtractor(sorting_uri, samplerate=30000)
recording = le.LabboxEphysRecordingExtractor(recording_uri, download=True)

feed = kp.load_feed(feed_uri)
workspace = le.load_workspace(workspace_name=workspace_name, feed=feed)
print(f'Feed URI: {feed.get_uri()}')
예제 #15
0
    info=dict(label=studySetName,
              electrode_type='silicon-probe',
              doi='',
              ground_truth='eMouse simulator from Kilosort2, linear drift',
              organism='',
              source='Jennifer Colonell and Marius Pachitariu',
              labels=["Simulated recording"]),
    description='''\
    This synthetic groundtruth is generated by a modifying the Kilosort2 eMouse simulator 
    developed by J. Colonell and M. Pachitariu. This simulator uses averaged
    unit waveforms from the recordings taken from the Kampff laboratory using a densely 
    spaced electrode array (15 x 17 layout spanning 100 x 102 micrometers). A linear probe 
    motion is generated by uniformly translating a 64-channel probe (Neuropixels layout) by 
    20 micrometers over 80 min. To study the effect of time duration and channel count
    on the sorting accuracy, we extracted 8 or 16 channels from the original output 
    (64 chans, 80 min) by taking a contiguous neighboring channels at various 
    time durations (5, 10, 20, 40, 80 min) starting at t=0. Ten recordings were sampled 
    from each channel count and time duration by uniformly varying the channel offsets. 

    The simulation inserted waveform templates at random channels and time points after
    multiplying them by a random scaling factor drawn from a Gamma distribution. 
    The baseline noise was randomly generated to match the power spectrum observed from 
    a Neuropixels recording, and a spatiotemporal smoothing was applied to induce 
    correlation between nearby channels and time samples.
    ''',
    studies=list_study_obj)

studyset_obj['self_reference'] = ka.store_object(studyset_obj)
with open(os.path.join(path_to, studySetName + '.json'), 'w') as f:
    json.dump(studyset_obj, f, indent=4)
예제 #16
0
def main():
    from spikeforest2 import sorters
    from spikeforest2 import processing

    parser = argparse.ArgumentParser(
        description='Run the SpikeForest2 main analysis')
    # parser.add_argument('analysis_file', help='Path to the analysis specification file (.json format).')
    # parser.add_argument('--config', help='Configuration file', required=True)
    # parser.add_argument('--output', help='Analysis output file (.json format)', required=True)
    # parser.add_argument('--slurm', help='Optional SLURM configuration file (.json format)', required=False, default=None)
    # parser.add_argument('--verbose', help='Provide some additional verbose output.', action='store_true')
    parser.add_argument(
        'spec',
        help='Path to the .json file containing the analysis specification')
    parser.add_argument('--output',
                        '-o',
                        help='The output .json file',
                        required=True)
    parser.add_argument('--force-run',
                        help='Force rerunning of all spike sorting',
                        action='store_true')
    parser.add_argument(
        '--force-run-all',
        help='Force rerunning of all spike sorting and other processing',
        action='store_true')
    parser.add_argument('--parallel',
                        help='Optional number of parallel jobs',
                        required=False,
                        default='0')
    parser.add_argument('--slurm',
                        help='Path to slurm config file',
                        required=False,
                        default=None)
    parser.add_argument('--cache',
                        help='The cache database to use',
                        required=False,
                        default=None)
    parser.add_argument('--rerun-failing',
                        help='Rerun sorting jobs that previously failed',
                        action='store_true')
    parser.add_argument('--test', help='Only run a few.', action='store_true')
    parser.add_argument('--job-timeout',
                        help='Timeout for sorting jobs',
                        required=False,
                        default=600)
    parser.add_argument('--log-file',
                        help='Log file for analysis progress',
                        required=False,
                        default=None)

    args = parser.parse_args()
    force_run_all = args.force_run_all

    # the following apply to sorting jobs only
    force_run = args.force_run or args.force_run_all
    job_timeout = float(args.job_timeout)
    cache_failing = True
    rerun_failing = args.rerun_failing

    with open(args.spec, 'r') as f:
        spec = json.load(f)

    # clear the log file
    if args.log_file is not None:
        with open(args.log_file, 'w'):
            pass

    studysets_path = spec['studysets']
    studyset_names = spec['studyset_names']
    spike_sorters = spec['spike_sorters']

    ka.set_config(fr='default_readonly')

    print(f'Loading study sets object from: {studysets_path}')
    studysets_obj = ka.load_object(studysets_path)
    if not studysets_obj:
        raise Exception(f'Unable to load: {studysets_path}')

    all_study_sets = studysets_obj['StudySets']
    study_sets = []
    for studyset in all_study_sets:
        if studyset['name'] in studyset_names:
            study_sets.append(studyset)

    if int(args.parallel) > 0:
        job_handler = hither.ParallelJobHandler(int(args.parallel))
        job_handler_gpu = job_handler
        job_handler_ks = job_handler
    elif args.slurm:
        with open(args.slurm, 'r') as f:
            slurm_config = json.load(f)
        job_handler = hither.SlurmJobHandler(working_dir='tmp_slurm',
                                             **slurm_config['cpu'])
        job_handler_gpu = hither.SlurmJobHandler(working_dir='tmp_slurm',
                                                 **slurm_config['gpu'])
        job_handler_ks = hither.SlurmJobHandler(working_dir='tmp_slurm',
                                                **slurm_config['ks'])
    else:
        job_handler = None
        job_handler_gpu = None
        job_handler_ks = None

    with hither.config(container='default',
                       cache=args.cache,
                       force_run=force_run_all,
                       job_handler=job_handler,
                       log_path=args.log_file), hither.job_queue():
        studies = []
        recordings = []
        for studyset in study_sets:
            studyset_name = studyset['name']
            print(f'================ STUDY SET: {studyset_name}')
            studies0 = studyset['studies']
            if args.test:
                studies0 = studies0[:1]
                studyset['studies'] = studies0
            for study in studies0:
                study['study_set'] = studyset_name
                study_name = study['name']
                print(f'======== STUDY: {study_name}')
                recordings0 = study['recordings']
                if args.test:
                    recordings0 = recordings0[:2]
                    study['recordings'] = recordings0
                for recording in recordings0:
                    recording['study'] = study_name
                    recording['study_set'] = studyset_name
                    recording['firings_true'] = recording['firingsTrue']
                    recordings.append(recording)
                studies.append(study)

        # Download recordings
        for recording in recordings:
            ka.load_file(recording['directory'] + '/raw.mda')
            ka.load_file(recording['directory'] + '/firings_true.mda')

        # Attach results objects
        for recording in recordings:
            recording['results'] = dict()

        # Summarize recordings
        for recording in recordings:
            recording_path = recording['directory']
            sorting_true_path = recording['firingsTrue']
            recording['results'][
                'computed-info'] = processing.compute_recording_info.run(
                    _label=
                    f'compute-recording-info:{recording["study"]}/{recording["name"]}',
                    recording_path=recording_path,
                    json_out=hither.File())
            recording['results'][
                'true-units-info'] = processing.compute_units_info.run(
                    _label=
                    f'compute-units-info:{recording["study"]}/{recording["name"]}',
                    recording_path=recording_path,
                    sorting_path=sorting_true_path,
                    json_out=hither.File())

        # Spike sorting
        for sorter in spike_sorters:
            for recording in recordings:
                if recording['study_set'] in sorter['studysets']:
                    recording_path = recording['directory']
                    sorting_true_path = recording['firingsTrue']

                    algorithm = sorter['processor_name']
                    if not hasattr(sorters, algorithm):
                        raise Exception(
                            f'No such sorting algorithm: {algorithm}')
                    Sorter = getattr(sorters, algorithm)

                    if algorithm in ['ironclust']:
                        gpu = True
                        jh = job_handler_gpu
                    elif algorithm in ['kilosort', 'kilosort2']:
                        gpu = True
                        jh = job_handler_ks
                    else:
                        gpu = False
                        jh = job_handler
                    with hither.config(gpu=gpu,
                                       force_run=force_run,
                                       exception_on_fail=False,
                                       cache_failing=cache_failing,
                                       rerun_failing=rerun_failing,
                                       job_handler=jh,
                                       job_timeout=job_timeout):
                        sorting_result = Sorter.run(
                            _label=
                            f'{algorithm}:{recording["study"]}/{recording["name"]}',
                            recording_path=recording['directory'],
                            sorting_out=hither.File())
                        recording['results']['sorting-' +
                                             sorter['name']] = sorting_result
                    recording['results'][
                        'comparison-with-truth-' +
                        sorter['name']] = processing.compare_with_truth.run(
                            _label=
                            f'comparison-with-truth:{algorithm}:{recording["study"]}/{recording["name"]}',
                            sorting_path=sorting_result.outputs.sorting_out,
                            sorting_true_path=sorting_true_path,
                            json_out=hither.File())
                    recording['results'][
                        'units-info-' +
                        sorter['name']] = processing.compute_units_info.run(
                            _label=
                            f'units-info:{algorithm}:{recording["study"]}/{recording["name"]}',
                            recording_path=recording_path,
                            sorting_path=sorting_result.outputs.sorting_out,
                            json_out=hither.File())

    # Assemble all of the results
    print('')
    print('=======================================================')
    print('Assembling results...')
    for recording in recordings:
        print(
            f'Assembling recording: {recording["study"]}/{recording["name"]}')
        recording['summary'] = dict(
            plots=dict(),
            computed_info=ka.load_object(
                recording['results']['computed-info'].outputs.json_out._path),
            true_units_info=ka.store_file(
                recording['results']
                ['true-units-info'].outputs.json_out._path))
    sorting_results = []
    for sorter in spike_sorters:
        for recording in recordings:
            if recording['study_set'] in sorter['studysets']:
                print(
                    f'Assembling sorting: {sorter["processor_name"]} {recording["study"]}/{recording["name"]}'
                )
                sorting_result = recording['results']['sorting-' +
                                                      sorter['name']]
                comparison_result = recording['results'][
                    'comparison-with-truth-' + sorter['name']]
                units_info_result = recording['results']['units-info-' +
                                                         sorter['name']]
                console_out_str = _console_out_to_str(
                    sorting_result.runtime_info['console_out'])
                console_out_path = ka.store_text(console_out_str)
                sr = dict(
                    recording=recording,
                    sorter=sorter,
                    firings_true=recording['directory'] + '/firings_true.mda',
                    processor_name=sorter['processor_name'],
                    processor_version=sorting_result.version,
                    execution_stats=dict(
                        start_time=sorting_result.runtime_info['start_time'],
                        end_time=sorting_result.runtime_info['end_time'],
                        elapsed_sec=sorting_result.runtime_info['end_time'] -
                        sorting_result.runtime_info['start_time'],
                        retcode=0 if sorting_result.success else -1,
                        timed_out=sorting_result.runtime_info.get(
                            'timed_out', False)),
                    container=sorting_result.container,
                    console_out=console_out_path)
                if sorting_result.success:
                    sr['firings'] = ka.store_file(
                        sorting_result.outputs.sorting_out._path)
                    sr['comparison_with_truth'] = dict(json=ka.store_file(
                        comparison_result.outputs.json_out._path))
                    sr['sorted_units_info'] = ka.store_file(
                        units_info_result.outputs.json_out._path)
                else:
                    sr['firings'] = None
                    sr['comparison_with_truth'] = None
                    sr['sorted_units_info'] = None
                sorting_results.append(sr)

    # Delete results from recordings
    for recording in recordings:
        del recording['results']

    # Aggregate sorting results
    print('')
    print('=======================================================')
    print('Aggregating sorting results...')
    aggregated_sorting_results = aggregate_sorting_results(
        studies, recordings, sorting_results)

    # Show output summary
    for sr in aggregated_sorting_results['study_sorting_results']:
        study_name = sr['study']
        sorter_name = sr['sorter']
        n1 = np.array(sr['num_matches'])
        n2 = np.array(sr['num_false_positives'])
        n3 = np.array(sr['num_false_negatives'])
        accuracies = n1 / (n1 + n2 + n3)
        avg_accuracy = np.mean(accuracies)
        txt = 'STUDY: {}, SORTER: {}, AVG ACCURACY: {}'.format(
            study_name, sorter_name, avg_accuracy)
        print(txt)

    output_object = dict(studies=studies,
                         recordings=recordings,
                         study_sets=study_sets,
                         sorting_results=sorting_results,
                         aggregated_sorting_results=ka.store_object(
                             aggregated_sorting_results,
                             basename='aggregated_sorting_results.json'))

    print(f'Writing output to {args.output}...')
    with open(args.output, 'w') as f:
        json.dump(output_object, f, indent=4)
    print('Done.')
예제 #17
0
def prepare_cortexlab_datasets():
    R1 = cortexlab_create_recording_object.run(
        bin_uri=
        'sha1://1b8592f0240603ae1019379cb47bad6475503aaf/tmp.dat?manifest=d05ca5b6e60e0fa8f2ff6f2f2ed822ff37da49c9',
        bin_size=
        87170751360,  # Later kachery-p2p will allow us to get this information from bin_uri
        channel_map_npy_uri=
        'sha1://b4de65964a758201db09f1e00d70ce40bca3a87e/channel_map.npy',
        channel_positions_npy_uri=
        'sha1://434c1bca7fd857bc5a1c9215bd890025d082fe8d/channel_positions.py',
        raw_num_channels=385,
        samplerate=30000)

    times_npy_uri = 'sha1://2d8264241321fda3b6c987412b353232068c3e93/spike_times.npy?manifest=b7f91b25b95252cdeb299b8249a622d49eddabcc'
    labels_npy_uri = 'sha1://cd893db02d086b332ee46d56b2373dd0350bf471/spike_clusters.npy?manifest=6efc0362d708fa3a9ae5ce9280898a54e6e5d189'
    cluster_groups_csv_uri = 'sha1://d7d12256973a2d7f48edefdb4d8bb03f68e59aa5/cluster_groups.csv'
    S1 = dict(sorting_format='npy1',
              data=dict(times_npy_uri=times_npy_uri,
                        labels_npy_uri=labels_npy_uri,
                        samplerate=30000))
    R2 = create_subrecording_object.run(recording_object=R1,
                                        channels=None,
                                        start_frame=0,
                                        end_frame=30000 * 10)
    R3 = create_subrecording_object.run(recording_object=R1,
                                        channels=[0, 1, 2, 3, 4, 5, 6, 7],
                                        start_frame=0,
                                        end_frame=30000 * 10)
    hi.wait()
    R1 = R1.get_result()
    R2 = R2.get_result()
    R3 = R3.get_result()

    S1_good = _keep_good_units(S1, cluster_groups_csv_uri)

    le_recordings = []
    le_sortings = []
    le_recordings.append(
        dict(recordingId='cortexlab-single-phase-3',
             recordingLabel='cortexlab-single-phase-3 (full)',
             recordingPath=ka.store_object(
                 R1, basename='cortexlab-single-phase-3.json'),
             recordingObject=R1,
             description='''
        A "Phase3" Neuropixels electrode array was inserted into the brain of an awake, head-fixed mouse for about an hour.
        '''.strip()))
    le_sortings.append(
        dict(sortingId='cortexlab-single-phase-3:curated',
             sortingLabel='cortexlab-single-phase-3 Curated',
             sortingPath=ka.store_object(
                 S1, basename='cortexlab-single-phase-3-curated.json'),
             sortingObject=S1,
             recordingId='cortexlab-single-phase-3',
             recordingPath=ka.store_object(
                 R1, basename='cortexlab-single-phase-3.json'),
             recordingObject=R1,
             description='''
        Curated spike sorting for cortexlab-single-phase-3
        '''.strip()))
    le_sortings.append(
        dict(sortingId='cortexlab-single-phase-3:curated_good',
             sortingLabel='cortexlab-single-phase-3 Curated (good units)',
             sortingPath=ka.store_object(
                 S1_good, basename='cortexlab-single-phase-3-curated.json'),
             sortingObject=S1_good,
             recordingId='cortexlab-single-phase-3',
             recordingPath=ka.store_object(
                 R1, basename='cortexlab-single-phase-3.json'),
             recordingObject=R1,
             description='''
        Curated spike sorting for cortexlab-single-phase-3 (good units only)
        '''.strip()))
    le_recordings.append(
        dict(recordingId='cortexlab-single-phase-3.10sec',
             recordingLabel='cortexlab-single-phase-3 (10 sec)',
             recordingPath=ka.store_object(
                 R2, basename='cortexlab-single-phase-3-10sec.json'),
             recordingObject=R2,
             description=
             'Extracted 10 seconds of data from the beginning of the recording'
             ))
    le_recordings.append(
        dict(
            recordingId='cortexlab-single-phase-3-ch0-7.10sec',
            recordingLabel='cortexlab-single-phase-3 (ch 0-7, 10 sec)',
            recordingPath=ka.store_object(
                R2, basename='cortexlab-single-phase-3-ch0-7-10sec.json'),
            recordingObject=R3,
            description=
            'Extracted a subset of channels and 10 seconds of data from the beginning of the recording'
        ))
    return le_recordings, le_sortings
예제 #18
0
def main():
    parser = argparse.ArgumentParser(
        description="Prepare SpikeForest recordings (i.e., populate this repository)")
    parser.add_argument('output_dir', help='The output directory (e.g., recordings)')
    parser.add_argument('--upload', action='store_true', help='Whether to upload the recording objects to kachery (password required)')
    # parser.add_argument('--verbose', action='store_true', help='Turn on verbose output')

    args = parser.parse_args()
    output_dir = args.output_dir

    if args.upload:
        ka.set_config(
            fr='default_readwrite',
            to='default_readwrite'
        )
    else:
        ka.set_config(
            fr='default_readonly',
        )
    
    # geom_mearec_neuronexus = np.genfromtxt('mearec_neuronexus_geom.csv', delimiter=',').tolist()
    mearec_neuronexus_geom_fname = 'mearec_neuronexus_geom.csv'

    # Load a spikeforest analysis object
    X = ka.load_object('sha1://b678d798d67b6faa3c6240aca52f3857c9e4b877/analysis.json')

    # the output directory on the local machine
    basedir = output_dir
    if os.path.exists(basedir):
        raise Exception('Directory already exists: {}'.format(basedir))

    if not os.path.exists(basedir):
        os.mkdir(basedir)

    studysets_to_include = ['PAIRED_BOYDEN', 'PAIRED_CRCNS_HC1', 'PAIRED_MEA64C_YGER', 'PAIRED_KAMPFF', 'PAIRED_MONOTRODE', 'SYNTH_BIONET', 'SYNTH_MONOTRODE', 'SYNTH_MAGLAND', 'SYNTH_MEAREC_NEURONEXUS', 'SYNTH_MEAREC_TETRODE', 'SYNTH_MONOTRODE', 'SYNTH_VISAPY', 'HYBRID_JANELIA', 'MANUAL_FRANKLAB']
    # studysets_to_include = ['PAIRED_CRCNS_HC1', 'PAIRED_MEA64C_YGER', 'PAIRED_KAMPFF', 'PAIRED_MONOTRODE', 'SYNTH_MONOTRODE', 'SYNTH_MAGLAND', 'SYNTH_MEAREC_NEURONEXUS', 'SYNTH_MEAREC_TETRODE', 'SYNTH_MONOTRODE', 'SYNTH_VISAPY', 'HYBRID_JANELIA', 'MANUAL_FRANKLAB']

    # These are the files to download within each recording
    fnames = ['geom.csv', 'params.json', 'raw.mda', 'firings_true.mda']
    # fnames = ['geom.csv', 'params.json']
    for studyset in X['StudySets']:
        studyset_name = studyset['name']
        if studyset_name in studysets_to_include:
            print('STUDYSET: {}'.format(studyset['name']))
            studysetdir_local = os.path.join(basedir, studyset_name)
            if not os.path.exists(studysetdir_local):
                os.mkdir(studysetdir_local)
            for study in studyset['studies']:
                study_name = study['name']
                print('STUDY: {}/{}'.format(studyset_name, study_name))
                studydir_local = os.path.join(studysetdir_local, study_name)
                if not os.path.exists(studydir_local):
                    os.mkdir(studydir_local)
                for recording in study['recordings']:
                    if studyset_name == 'SYNTH_MEAREC_NEURONEXUS':
                        patch_recording_geom(recording, mearec_neuronexus_geom_fname)
                    recname = recording['name']
                    print('RECORDING: {}/{}/{}'.format(studyset_name, study_name, recname))
                    recdir = recording['directory']
                    recfile = os.path.join(studydir_local, recname + '.json')
                    obj = dict(
                        raw=recdir + '/raw.mda',
                        params=ka.load_object(recdir + '/params.json'),
                        geom=np.genfromtxt(ka.load_file(recdir + '/geom.csv'), delimiter=',').T
                    )
                    obj = _json_serialize(obj)
                    obj['self_reference'] = ka.store_object(obj, basename='{}/{}/{}.json'.format(studyset_name, study_name, recname))
                    with open(recfile, 'w') as f:
                        json.dump(obj, f, indent=4)
                    firings_true_file = os.path.join(studydir_local, recname + '.firings_true.json')
                    obj2 = dict(
                        firings=recdir + '/firings_true.mda'
                    )
                    obj2['self_reference'] = ka.store_object(obj2, basename='{}/{}/{}.firings_true.json'.format(studyset_name, study_name, recname))
                    with open(firings_true_file, 'w') as f:
                        json.dump(obj2, f, indent=4)
                study['self_reference'] = ka.store_object(study, basename='{}.json'.format(study_name))
                with open(os.path.join(studydir_local, study_name + '.json'), 'w') as f:
                    json.dump(study, f, indent=4)
            studyset['self_reference'] = ka.store_object(studyset, basename='{}.json'.format(studyset_name))
            with open(os.path.join(studysetdir_local, studyset_name + '.json'), 'w') as f:
                json.dump(studyset, f, indent=4)
    studysets_obj = dict(
        StudySets=X['StudySets']
    )
    studysets_path = ka.store_object(studysets_obj, basename='studysets.json')
    with open(os.path.join(basedir, 'studysets'), 'w') as f:
        f.write(studysets_path)
예제 #19
0
 def handle_message(self, msg):
     type0 = msg.get('type')
     if type0 == 'reportClientInfo':
         print('reported client info:', msg)
         self._feed_uri = msg['clientInfo']['feedUri']
         self._workspace_name = msg['clientInfo']['workspaceName']
         self._readonly = msg['clientInfo']['readOnly']
         if not self._feed_uri:
             self._feed_uri = 'feed://' + self._default_feed_id
             # self._feed_uri = kp.create_feed(feed_name='labbox-ephys-default').get_uri()
         # assert self._feed_uri.startswith('sha1://'), 'For now, feedUri must start with sha1://'
         self._feed = kp.load_feed(self._feed_uri)
         for key in ['recordings', 'sortings']:
             self._subfeed_positions[key] = 0
             subfeed_name = dict(key=key,
                                 workspaceName=self._workspace_name)
             subfeed = self._feed.get_subfeed(subfeed_name)
             messages = subfeed.get_next_messages(wait_msec=10)
             for m in messages:
                 if 'action' in m:
                     self._send_message({
                         'type': 'action',
                         'action': m['action']
                     })
                 else:
                     print(f'WARNING: No action in message for {key}')
             self._subfeed_positions[
                 key] = self._subfeed_positions[key] + len(messages)
         self._send_message({'type': 'reportInitialLoadComplete'})
         if self._feed:
             qm = self._queued_document_action_messages
             self._queued_document_action_messages = []
             for m in qm:
                 self.handle_message(m)
     elif type0 == 'appendDocumentAction':
         if self._readonly:
             print(
                 'Cannot append document action. This is a readonly feed.')
             return
         if self._feed is None:
             self._queued_document_action_messages.append(msg)
         else:
             subfeed_name = dict(key=msg['key'],
                                 workspaceName=self._workspace_name)
             subfeed = self._feed.get_subfeed(subfeed_name)
             subfeed.append_message({'action': msg['action']})
     elif type0 == 'hitherCreateJob':
         functionName = msg['functionName']
         kwargs = msg['kwargs']
         client_job_id = msg['clientJobId']
         try:
             outer_job = hi.run(functionName,
                                **kwargs,
                                labbox=self._labbox_context)
         except Exception as err:
             self._send_message({
                 'type': 'hitherJobError',
                 'job_id': client_job_id,
                 'client_job_id': client_job_id,
                 'error_message': f'Error creating outer job: {str(err)}',
                 'runtime_info': None
             })
             return
         try:
             job_or_result = outer_job.wait()
         except Exception as err:
             self._send_message({
                 'type': 'hitherJobError',
                 'job_id': outer_job._job_id,
                 'client_job_id': client_job_id,
                 'error_message': str(err),
                 'runtime_info': outer_job.get_runtime_info()
             })
             return
         if hasattr(job_or_result, '_job_id'):
             job = job_or_result
             setattr(job, '_client_job_id', client_job_id)
             job_id = job._job_id
             self._jobs_by_id[job_id] = job
             print(
                 f'======== Created hither job (2): {job_id} {functionName}'
             )
             self._send_message({
                 'type': 'hitherJobCreated',
                 'job_id': job_id,
                 'client_job_id': client_job_id
             })
         else:
             result = job_or_result
             msg = {
                 'type':
                 'hitherJobFinished',
                 'client_job_id':
                 client_job_id,
                 'job_id':
                 client_job_id,
                 # 'result': _make_json_safe(result),
                 'result_sha1':
                 ka.get_file_hash(ka.store_object(_make_json_safe(result))),
                 'runtime_info':
                 outer_job.get_runtime_info()
             }
     elif type0 == 'hitherCancelJob':
         job_id = msg['job_id']
         assert job_id, 'Missing job_id'
         assert job_id in self._jobs_by_id, f'No job with id: {job_id}'
         job = self._jobs_by_id[job_id]
         job.cancel()
def main():
    parser = argparse.ArgumentParser(
        description=
        "Prepare SpikeForest recordings (i.e., populate this repository)")
    parser.add_argument('output_dir',
                        help='The output directory (e.g., recordings)')
    parser.add_argument(
        '--upload',
        action='store_true',
        help=
        'Whether to upload the recording objects to kachery (password required)'
    )
    # parser.add_argument('--verbose', action='store_true', help='Turn on verbose output')

    args = parser.parse_args()
    output_dir = args.output_dir

    if args.upload:
        ka.set_config(fr='default_readwrite', to='default_readwrite')
    else:
        ka.set_config(fr='default_readonly', )

    # geom_mearec_neuronexus = np.genfromtxt('mearec_neuronexus_geom.csv', delimiter=',').tolist()
    mearec_neuronexus_geom_fname = 'mearec_neuronexus_geom.csv'

    # Load a spikeforest analysis object
    X = ka.load_object(
        'sha1://b678d798d67b6faa3c6240aca52f3857c9e4b877/analysis.json')

    # the output directory on the local machine
    basedir = output_dir
    if os.path.exists(basedir):
        raise Exception('Directory already exists: {}'.format(basedir))

    if not os.path.exists(basedir):
        os.mkdir(basedir)

    studysets_to_add = ['PAIRED_ENGLISH']
    studysets_to_include = [
        'PAIRED_BOYDEN', 'PAIRED_CRCNS_HC1', 'PAIRED_MEA64C_YGER',
        'PAIRED_KAMPFF', 'PAIRED_MONOTRODE', 'SYNTH_BIONET', 'SYNTH_MONOTRODE',
        'SYNTH_MAGLAND', 'SYNTH_MEAREC_NEURONEXUS', 'SYNTH_MEAREC_TETRODE',
        'SYNTH_MONOTRODE', 'SYNTH_VISAPY', 'HYBRID_JANELIA', 'MANUAL_FRANKLAB'
    ]
    # studysets_to_include = ['PAIRED_CRCNS_HC1', 'PAIRED_MEA64C_YGER', 'PAIRED_KAMPFF', 'PAIRED_MONOTRODE', 'SYNTH_MONOTRODE', 'SYNTH_MAGLAND', 'SYNTH_MEAREC_NEURONEXUS', 'SYNTH_MEAREC_TETRODE', 'SYNTH_MONOTRODE', 'SYNTH_VISAPY', 'HYBRID_JANELIA', 'MANUAL_FRANKLAB']

    listdir_ = lambda _path: [
        x for x in os.listdir(_path) if os.path.isdir(os.path.join(_path, x))
    ]
    listfile_ = lambda _path: [
        x for x in os.listdir(_path) if os.path.isfile(os.path.join(_path, x))
    ]

    # These are the files to download within each recording
    fnames = ['geom.csv', 'params.json', 'raw.mda', 'firings_true.mda']
    # fnames = ['geom.csv', 'params.json']
    for studyset_name in studysets_to_add:
        studyset = dict(name=studyset_name,
                        info=studyset_name,
                        desciption=studyset_name)
        print('STUDYSET: {}'.format(studyset_name))
        studysetdir_local = os.path.join(basedir, studyset_name)
        assert os.path.exists(studysetdir_local)
        list_study = []
        list_study_name = listdir_(studysetdir_local)
        for study_name in list_study_name:
            study = dict(name=study_name, studySetName=studyset_name)
            print('STUDY: {}/{}'.format(studyset_name, study_name))
            studydir_local = os.path.join(studysetdir_local, study_name)
            assert os.path.exists(studydir_local)
            list_recname = listfile_(studydir_local)
            list_recname = [
                x.replace('.json', '') for x in list_recname
                if (not 'firings_true.json' in x)
            ]
            list_recording = []
            for recname in list_recname:
                recording = dict(name=recname,
                                 studyName=study_name,
                                 studySetName=studyset_name)
                print('RECORDING: {}/{}/{}'.format(studyset_name, study_name,
                                                   recname))
                with open(os.path.join(studydir_local, recname + '.json'),
                          'r') as f:
                    recording = json.load(f)

                recording['directory'] = recdir

                list_recording.append(recording)
            study['self_reference'] = ka.store_object(
                study, basename='{}.json'.format(study_name))
            list_study.append(study)
            with open(os.path.join(studydir_local, study_name + '.json'),
                      'w') as f:
                json.dump(study, f, indent=4)
        studyset['studies'] = list_study
        studyset['self_reference'] = ka.store_object(
            studyset, basename='{}.json'.format(studyset_name))
        with open(os.path.join(studysetdir_local, studyset_name + '.json'),
                  'w') as f:
            json.dump(studyset, f, indent=4)

    # add studysets
    StudySets_add = []
    for studyset_name in studysets_to_add:

        StudySets_add.append(studyset)

    StudySets = list.join(X['StudySets'], StudySets_add)
    studysets_obj = dict(StudySets=X['StudySets'])
    studysets_path = ka.store_object(studysets_obj, basename='studysets.json')
    with open(os.path.join(basedir, 'studysets'), 'w') as f:
        f.write(studysets_path)
예제 #21
0
#!/usr/bin/env python

from mountaintools import client as mt
import kachery as ka

# Note: download token is required here
mt.configDownloadFrom('spikeforest.kbucket')
ka.set_config(
    fr='default_readwrite',
    to='default_readwrite'
)

X = mt.loadObject(path='sha1://b678d798d67b6faa3c6240aca52f3857c9e4b877/analysis.json')
ka.store_object(X, basename='analysis.json')

X = ka.load_object('sha1://b678d798d67b6faa3c6240aca52f3857c9e4b877/analysis.json')

def get_sha1_part_of_sha1dir(path):
    if path.startswith('sha1dir://'):
        list0 = path.split('/')
        list1 = list0[2].split('.')
        return list1[0]
    else:
        return None

# studysets_to_include = ['PAIRED_BOYDEN', 'PAIRED_CRCNS_HC1', 'PAIRED_MEA64C_YGER', 'PAIRED_KAMPFF', 'PAIRED_MONOTRODE', 'SYNTH_MONOTRODE', 'SYNTH_MAGLAND', 'SYNTH_MEAREC_NEURONEXUS', 'SYNTH_MEAREC_TETRODE', 'SYNTH_MONOTRODE', 'SYNTH_VISAPY', 'HYBRID_JANELIA', 'MANUAL_FRANKLAB']
studysets_to_include = ['SYNTH_BIONET']
fnames = ['geom.csv', 'params.json', 'raw.mda', 'firings_true.mda']
# fnames = ['geom.csv', 'params.json', 'firings_true.mda']
# fnames = ['geom.csv', 'params.json']
for studyset in X['StudySets']:
예제 #22
0
def _test_store_object(val: dict):
    x = ka.store_object(val)
    assert x
    val2 = ka.load_object(x)
    assert val == val2
예제 #23
0
def prepare_svoboda_datasets():
    le_recordings = []
    le_sortings = []
    le_curation_actions = []

    # svoboda-SC026_080619_g0_tcat_imec0
    recording_obj, sorting_obj, unit_notes = prepare_recording(
        bin_uri=
        'sha1://f94ac8b42c423e551ad461f57c1cecf6cd5bc9d2/SC026_080619_g0_tcat.imec0.ap.bin?manifest=c3f82c2d10106b3739fca0ecb298c7330b6df72a',
        bin_file_size=76029609548,
        raw_num_channels=302,
        mat_uri=
        'sha1://bb21a7cc8b9e409cd61ed1fc521937f72797ddad/data.mat?manifest=a736aa493def3770401301b9d2a946fd6fe5aff3',
        meta_uri=
        'sha1://5f19fdf70696cf85b76208e41f08c0ac6b7e1e03/SC026_080619_g0_tcat.imec0.ap.meta',  # perhaps will use in future
        single_only=True)
    le_recordings.append(
        dict(recordingId='svoboda-SC026_080619_g0_tcat_imec0',
             recordingLabel='svoboda-SC026_080619_g0_tcat_imec0',
             recordingPath=ka.store_object(
                 recording_obj,
                 basename='svoboda-SC026_080619_g0_tcat_imec0.json'),
             recordingObject=recording_obj,
             description='''
        A Phase 3B Neuropixels probe was inserted 2.9 mm into secondary motor cortex of an awake, head-fixed mouse performing a trial-based behavioural task.
        '''.strip()))
    le_sortings.append(
        dict(sortingId='svoboda-SC026_080619_g0_tcat_imec0:curated',
             sortingLabel='svoboda-SC026_080619_g0_tcat_imec0:curated',
             sortingPath=ka.store_object(
                 sorting_obj,
                 basename='svoboda-SC026_080619_g0_tcat_imec0-curated.json'),
             sortingObject=sorting_obj,
             recordingId='svoboda-SC026_080619_g0_tcat_imec0',
             recordingPath=ka.store_object(
                 recording_obj,
                 basename='svoboda-SC026_080619_g0_tcat_imec0.json'),
             recordingObject=recording_obj,
             description='''
        Curated spike sorting for svoboda-SC026_080619_g0_tcat_imec0
        '''.strip()))
    for unit_id, notes in unit_notes.items():
        for note in notes:
            le_curation_actions.append(
                dict(type='ADD_UNIT_LABEL',
                     sortingId='svoboda-SC026_080619_g0_tcat_imec0:curated',
                     unitId=unit_id,
                     label=note))

    # svoboda-SC022_030319_g0_tcat_imec2
    recording_obj, sorting_obj, unit_notes = prepare_recording(
        bin_uri=
        'sha1://9e7e76e467a28454ad9b76d29cb99d5330fffd5b/SC022_030319_g0_tcat.imec2.ap.bin?manifest=fc0b2783b88b61a5b84ac7a3dbd7fd9984557805',
        bin_file_size=112205135350,
        raw_num_channels=385,
        mat_uri=
        'sha1://a7c467b959a66f072b5aa6ef7c13d9118b26942b/SC022_030319_g0_tcat.imec2.ap.mat?manifest=0970f173ad47c76212f4f16dd028d0850cda8745',
        meta_uri=
        'sha1://a2bc30784266288cd6bd0b8c861dd182e538ed3c/SC022_030319_g0_tcat.imec2.ap.meta',  # perhaps will use in future
        single_only=True)
    le_recordings.append(
        dict(recordingId='svoboda-SC022_030319_g0_tcat_imec2',
             recordingLabel='svoboda-SC022_030319_g0_tcat_imec2',
             recordingPath=ka.store_object(
                 recording_obj,
                 basename='svoboda-SC022_030319_g0_tcat_imec2.json'),
             recordingObject=recording_obj,
             description='''
        A Phase 3B Neuropixels probe was inserted 4.5 mm into left hemisphere striatum of an awake, head-fixed mouse performing a trial-based behavioural task.
        '''.strip()))
    le_sortings.append(
        dict(sortingId='svoboda-SC022_030319_g0_tcat_imec2:curated',
             sortingLabel='svoboda-SC022_030319_g0_tcat_imec2:curated',
             sortingPath=ka.store_object(
                 sorting_obj,
                 basename='svoboda-SC022_030319_g0_tcat_imec2-curated.json'),
             sortingObject=sorting_obj,
             recordingId='svoboda-SC022_030319_g0_tcat_imec2',
             recordingPath=ka.store_object(
                 recording_obj,
                 basename='svoboda-SC022_030319_g0_tcat_imec2.json'),
             recordingObject=recording_obj,
             description='''
        Curated spike sorting for svoboda-SC022_030319_g0_tcat_imec2
        '''.strip()))
    for unit_id, notes in unit_notes.items():
        for note in notes:
            le_curation_actions.append(
                dict(type='ADD_UNIT_LABEL',
                     sortingId='svoboda-SC022_030319_g0_tcat_imec2:curated',
                     unitId=unit_id,
                     label=note))

    # svoboda-SC026_080619_g0_tcat_imec2
    recording_obj, sorting_obj, unit_notes = prepare_recording(
        bin_uri=
        'sha1://712b436030d1ab068eaf69c58172fffb261670ae/SC026_080619_g0_tcat.imec2.ap.bin?manifest=68c5ccc714a430143a435aee277f5c4209161e83',
        bin_file_size=96925126760,
        raw_num_channels=385,
        mat_uri=
        'sha1://273707a53a5eb401441cd56dafdc3187bd6ae79f/SC026_080619_g0_tcat.imec2.ap.mat?manifest=dac1ac15f32067879408fa9f693c09891f6a51c1',
        meta_uri=
        'sha1://1d9affa941e8953d61b9b80f4f8175b009384fa5/SC026_080619_g0_tcat.imec2.ap.meta',  # perhaps will use in future
        single_only=True)
    le_recordings.append(
        dict(recordingId='svoboda-SC026_080619_g0_tcat_imec2',
             recordingLabel='svoboda-SC026_080619_g0_tcat_imec2',
             recordingPath=ka.store_object(
                 recording_obj,
                 basename='svoboda-SC026_080619_g0_tcat_imec2.json'),
             recordingObject=recording_obj,
             description='''
        A Phase 3B Neuropixels probe was inserted 4.7 mm into the left hemisphere hippocampus&thalamus of an awake, head-fixed mouse performing a trial-based behavioural task.
        '''.strip()))
    le_sortings.append(
        dict(sortingId='svoboda-SC026_080619_g0_tcat_imec2:curated',
             sortingLabel='svoboda-SC026_080619_g0_tcat_imec2:curated',
             sortingPath=ka.store_object(
                 sorting_obj,
                 basename='svoboda-SC026_080619_g0_tcat_imec2-curated.json'),
             sortingObject=sorting_obj,
             recordingId='svoboda-SC026_080619_g0_tcat_imec2',
             recordingPath=ka.store_object(
                 recording_obj,
                 basename='svoboda-SC026_080619_g0_tcat_imec2.json'),
             recordingObject=recording_obj,
             description='''
        Curated spike sorting for svoboda-SC026_080619_g0_tcat_imec2
        '''.strip()))
    for unit_id, notes in unit_notes.items():
        for note in notes:
            le_curation_actions.append(
                dict(type='ADD_UNIT_LABEL',
                     sortingId='svoboda-SC026_080619_g0_tcat_imec2:curated',
                     unitId=unit_id,
                     label=note))

    # svoboda-SC035_011020_g0_tcat_imec0
    recording_obj, sorting_obj, unit_notes = prepare_recording(
        bin_uri=
        'sha1://ec1543ce5b040e5e56901859bf208b6f0afa4bb0/SC035_011020_g0_tcat.imec0.ap.bin?manifest=5cfb7bc0670ae892b2d84b81c402e0bb543578d0',
        bin_file_size=84952919590,
        raw_num_channels=385,
        mat_uri=
        'sha1://dfb054655a665b5e9ad69c63a187b6da92a75c59/SC035_011020_g0_tcat.imec0.ap.2.mat?manifest=c25f9a710cdb810b833357d5aa3d64111d18ff2a',
        meta_uri=
        'sha1://d11e93ae6d760a3fb57da1b8d91a86d5caae7a73/SC035_011020_g0_tcat.imec0.ap.meta',  # perhaps will use in future
        single_only=True)
    le_recordings.append(
        dict(recordingId='svoboda-SC035_011020_g0_tcat_imec0',
             recordingLabel='svoboda-SC035_011020_g0_tcat_imec0',
             recordingPath=ka.store_object(
                 recording_obj,
                 basename='svoboda-SC035_011020_g0_tcat_imec0.json'),
             recordingObject=recording_obj,
             description='''
        A 2.0 4-shank Neuropixels probe was inserted 1 mm into the right hemisphere secondary motor cortex of an awake, head-fixed mouse performing a trial-based behavioural task.
        '''.strip()))
    le_sortings.append(
        dict(sortingId='svoboda-SC035_011020_g0_tcat_imec0:curated',
             sortingLabel='svoboda-SC035_011020_g0_tcat_imec0:curated',
             sortingPath=ka.store_object(
                 sorting_obj,
                 basename='svoboda-SC035_011020_g0_tcat_imec0-curated.json'),
             sortingObject=sorting_obj,
             recordingId='svoboda-SC035_011020_g0_tcat_imec0',
             recordingPath=ka.store_object(
                 recording_obj,
                 basename='svoboda-SC035_011020_g0_tcat_imec0.json'),
             recordingObject=recording_obj,
             description='''
        Curated spike sorting for svoboda-SC035_011020_g0_tcat_imec0
        '''.strip()))
    for unit_id, notes in unit_notes.items():
        for note in notes:
            le_curation_actions.append(
                dict(type='ADD_UNIT_LABEL',
                     sortingId='svoboda-SC035_011020_g0_tcat_imec0:curated',
                     unitId=unit_id,
                     label=note))

    # svoboda-SC035_010920_g0_tcat_imec1
    recording_obj, sorting_obj, unit_notes = prepare_recording(
        bin_uri=
        'sha1://cf2c025650d86d70381515fde10d45fcb2672771/SC035_010920_g0_tcat.imec1.ap.bin?manifest=a22aa64d9972d5901d3a66f0a8f12c406b836ba5',
        bin_file_size=87520507690,
        raw_num_channels=385,
        mat_uri=
        'sha1://b4e759aa392a0896314a635dc60ae2ac1ba8fd1c/SC035_010920_g0_tcat.imec1.ap.mat?manifest=49f19bf45aa6c26329e8fa50843fe41f80bf913b',
        meta_uri=
        'sha1://64300f49706a2d455b7fc0b6f17c2627623ad76f/SC035_010920_g0_tcat.imec1.ap.meta',  # perhaps will use in future
        single_only=True)
    le_recordings.append(
        dict(recordingId='svoboda-SC035_010920_g0_tcat_imec1',
             recordingLabel='svoboda-SC035_010920_g0_tcat_imec1',
             recordingPath=ka.store_object(
                 recording_obj,
                 basename='svoboda-SC035_010920_g0_tcat_imec1.json'),
             recordingObject=recording_obj,
             description='''
        A 2.0 4-shank Neuropixels probe was inserted 4.75 mm into the left hemisphere medulla of an awake, head-fixed mouse performing a trial-based behavioural task.
        '''.strip()))
    le_sortings.append(
        dict(sortingId='svoboda-SC035_010920_g0_tcat_imec1:curated',
             sortingLabel='svoboda-SC035_010920_g0_tcat_imec1:curated',
             sortingPath=ka.store_object(
                 sorting_obj,
                 basename='svoboda-SC035_010920_g0_tcat_imec1-curated.json'),
             sortingObject=sorting_obj,
             recordingId='svoboda-SC035_010920_g0_tcat_imec1',
             recordingPath=ka.store_object(
                 recording_obj,
                 basename='svoboda-SC035_010920_g0_tcat_imec1.json'),
             recordingObject=recording_obj,
             description='''
        Curated spike sorting for svoboda-SC035_010920_g0_tcat_imec1
        '''.strip()))
    for unit_id, notes in unit_notes.items():
        for note in notes:
            le_curation_actions.append(
                dict(type='ADD_UNIT_LABEL',
                     sortingId='svoboda-SC035_010920_g0_tcat_imec1:curated',
                     unitId=unit_id,
                     label=note))

    return le_recordings, le_sortings, le_curation_actions
예제 #24
0
def store_object(object: dict, basename: Union[str, None] = None):
    return ka.store_object(object, basename=basename)
예제 #25
0
list_rec = [
    str(f) for f in os.listdir(path_from)
    if os.path.isdir(os.path.join(path_from, f))
]

print('# files: {}'.format(len(list_rec)))
for rec1 in list_rec:
    print(f'Uploading {rec1}')
    path_rec1 = os.path.join(path_from, rec1)
    if False:
        register_recording(recdir=path_rec1,
                           output_fname=os.path.join(path_to, rec1 + '.json'),
                           label=rec1,
                           to='default_readwrite')
    if False:
        register_groundtruth(recdir=path_rec1,
                             output_fname=os.path.join(
                                 path_to, rec1 + '.firings_true.json'),
                             label=rec1 + '.firings_true',
                             to='default_readwrite')

# write to PAIRED_ENGLISH.json
print(f'STUDYSET: {studyset_name}')
print('STUDY: {}/{}'.format(studyset_name, study_name))
studydir_local = path_to

study['self_reference'] = ka.store_object(
    study, basename='{}.json'.format(study_name))
with open(os.path.join(path_to, studyset_name + '.json'), 'w') as f:
    json.dump(study, f, indent=4)
예제 #26
0
import os
thisdir = os.path.dirname(os.path.realpath(__file__))
os.environ['KACHERY_STORAGE_DIR'] = thisdir + '/kachery-storage-other'
import kachery as ka

uri = ka.store_object(dict(test=[a for a in range(20000000)])) # sha1://0bcfc4795a0ffc3c69b2ec30605ff2dfe95fe51f/file.json
# uri = ka.store_object(dict(test=[a for a in range(1000000)])) # sha1://ddda24849b34d954a14a7dc9631943c691c6bbe7/file.json

print(uri)
예제 #27
0
) as jh:
    with hi.Config(container=True,
                   job_cache=jc,
                   job_handler=jh,
                   required_files=recording_object):
        x = le.sorters.mountainsort4.run(
            recording_object=recording_object).wait()
        sorting_object = x['sorting_object']

le_recordings = []
le_sortings = []
le_curation_actions = []
le_recordings.append(
    dict(recordingId='test-recording-1',
         recordingLabel='test-recording-1',
         recordingPath=ka.store_object(recording_object,
                                       basename='test-recording-1.json'),
         recordingObject=recording_object,
         description='''
    A test recording
    '''.strip()))
le_sortings.append(
    dict(sortingId='test-recording-1:mountainsort4',
         sortingLabel='test-recording-1:mountainsort4',
         sortingPath=ka.store_object(
             sorting_object, basename='test-recording-1-mountainsort4.json'),
         sortingObject=sorting_object,
         recordingId='test-recording-1',
         recordingPath=ka.store_object(recording_object,
                                       basename='test-recording-1.json'),
         recordingObject=recording_object,
         description='''
예제 #28
0
 def iterate(self):
     subfeed_watches = {}
     if (self._feed_uri
             is not None) and (self._feed_uri.startswith('feed://')):
         for key in ['recordings', 'sortings']:
             subfeed_name = dict(workspaceName=self._workspace_name,
                                 key=key)
             subfeed_watches[key] = dict(
                 feedId=self._feed._feed_id,  # fix this
                 subfeedName=subfeed_name,
                 position=self._subfeed_positions[key])
     for w in self._additional_subfeed_watches:
         subfeed_watches[w['watch_name']] = dict(
             feedId=w['feed_id'],
             subfeedHash=w['subfeed_hash'],
             position=self._subfeed_positions[w['watch_name']])
     if len(subfeed_watches.keys()) > 0:
         messages = kp.watch_for_new_messages(
             subfeed_watches=subfeed_watches, wait_msec=100)
         for key in messages.keys():
             if key in ['recordings', 'sortings']:
                 for m in messages[key]:
                     if 'action' in m:
                         self._send_message({
                             'type': 'action',
                             'action': m['action']
                         })
                     else:
                         print(
                             f'WARNING: no action in feed message for {key}'
                         )
             else:
                 for m in messages[key]:
                     self._send_message({
                         'type': 'subfeedMessage',
                         'watchName': key,
                         'message': m
                     })
             self._subfeed_positions[
                 key] = self._subfeed_positions[key] + len(messages[key])
     hi.wait(0)
     job_ids = list(self._jobs_by_id.keys())
     for job_id in job_ids:
         job = self._jobs_by_id[job_id]
         status0 = job.get_status()
         if status0 == hi.JobStatus.FINISHED:
             print(
                 f'======== Finished hither job: {job_id} {job.get_label()}'
             )
             result = job.get_result()
             runtime_info = job.get_runtime_info()
             del self._jobs_by_id[job_id]
             msg = {
                 'type':
                 'hitherJobFinished',
                 'client_job_id':
                 job._client_job_id,
                 'job_id':
                 job_id,
                 # 'result': _make_json_safe(result),
                 'result_sha1':
                 ka.get_file_hash(ka.store_object(_make_json_safe(result))),
                 'runtime_info':
                 runtime_info
             }
             self._send_message(msg)
         elif status0 == hi.JobStatus.ERROR:
             exc = job.get_exception()
             runtime_info = job.get_runtime_info()
             del self._jobs_by_id[job_id]
             msg = {
                 'type': 'hitherJobError',
                 'job_id': job_id,
                 'client_job_id': job._client_job_id,
                 'error_message': str(exc),
                 'runtime_info': runtime_info
             }
             self._send_message(msg)
예제 #29
0
def _create_sorting_object(sorting):
    unit_ids = sorting.get_unit_ids()
    times_list = []
    labels_list = []
    for i in range(len(unit_ids)):
        unit = unit_ids[i]
        times = sorting.get_unit_spike_train(unit_id=unit)
        times_list.append(times)
        labels_list.append(np.ones(times.shape) * unit)
    all_times = np.concatenate(times_list)
    all_labels = np.concatenate(labels_list)
    sort_inds = np.argsort(all_times)
    all_times = all_times[sort_inds]
    all_labels = all_labels[sort_inds]
    times_npy_uri = ka.store_npy(all_times)
    labels_npy_uri = ka.store_npy(all_labels)
    return dict(
        sorting_format='npy1',
        data=dict(
            times_npy_uri=times_npy_uri,
            labels_npy_uri=labels_npy_uri,
            samplerate=30000
        )
    )

# substitute sorting extractor here
recording, sorting = se.example_datasets.toy_example()

sorting_object = _create_sorting_object(sorting)
uri = ka.store_object(sorting_object)
print(f'Sorting URI: {uri}')
def prepare_allen_datasets():
    bin1_uri = 'sha1://39ae3fcccd3803170dd97fc9a8799e7169214419/allen_mouse419112_probeE.dat?manifest=f021b78c2fac87af872d6e6cf3f7505194395692'
    bin2_uri = 'sha1://c5acd91cfde60bc8ba619f5b03245fe6c034f682/allen_mouse415148_probeE.dat?manifest=8c99b1ffd502dc5281fc569e652d45b787df5ebc'
    channel_info_uri = 'sha1://349d7f018f4f09da5c230a9d46e07c2aeffbc1e2/channel_info.csv'
    channel_positions = _load_channel_positions_from_csv(channel_info_uri)
    X1 = dict(
        recording_format='bin1',
        data=dict(
            raw=bin1_uri,
            raw_num_channels=384,
            num_frames=105000000,  # infer from file size and guess of samplerate
            samplerate=30000,  # guess
            channel_ids=list(range(0, 384)),  # for now
            # The following are placeholders... we need the actual geom file.
            channel_map=dict(
                zip([str(c) for c in range(0, 384)],
                    [c for c in range(0, 384)])),
            channel_positions=channel_positions))
    XX2 = dict(
        recording_format='bin1',
        data=dict(
            raw=bin2_uri,
            raw_num_channels=384,
            num_frames=105000000,  # infer from file size and guess of samplerate
            samplerate=30000,  # guess
            channel_ids=list(range(0, 384)),  # for now
            # The following are placeholders... we need the actual geom file.
            channel_map=dict(
                zip([str(c) for c in range(0, 384)],
                    [c for c in range(0, 384)])),
            channel_positions=channel_positions))

    times1_npy_uri = 'sha1://57029ae68643881f5d4015397be87ba0d4815b52/curated_unit_times.npy?manifest=80b52bf7cd37ef7fb0d4ba5d1dfa543ffb207ce1'
    labels1_npy_uri = 'sha1://61762d8f0bdac57db64ceec1636e0009af0f02ef/curated_unit_IDs.npy?manifest=f716950fadb97a5a154d8762220194af6381e2c1'
    unit_channels1_npy_uri = 'sha1://8b3a98b9d45c1c62eb4402245800e278873bd8e5/curated_unit_channels.npy?manifest=91e899e3d4649f3ae457f6bf0926211dea8aa8fe'
    S1 = dict(sorting_format='npy1',
              data=dict(times_npy_uri=times1_npy_uri,
                        labels_npy_uri=labels1_npy_uri,
                        samplerate=30000))

    times2_npy_uri = 'sha1://4c717829e3ce6530349a38bd5f72fac216916276/curated_unit_times.npy?manifest=557d7cf852892b6f333b9355a3ea2293558b2a29'
    labels2_npy_uri = 'sha1://f55da958a7725edf8bde63eecf1d53edcb9de76d/curated_unit_IDs.npy?manifest=e028ca15c01ea5f53e2bd341ab001741e7842084'
    unit_channels2_npy_uri = 'sha1://7f2079292b1ef29264b9152073d09dfa3b4dcbe7/curated_unit_channels.npy?manifest=2b35e2b83c9af0431b8aa1ab69e1846e21f24668'
    S2 = dict(sorting_format='npy1',
              data=dict(times_npy_uri=times2_npy_uri,
                        labels_npy_uri=labels2_npy_uri,
                        samplerate=30000))

    X1a = create_subrecording_object.run(recording_object=X1,
                                         channels=[0, 1, 2, 3, 4, 5, 6, 7],
                                         start_frame=0,
                                         end_frame=30000 * 10)
    X1b = create_subrecording_object.run(recording_object=X1,
                                         channels=None,
                                         start_frame=0,
                                         end_frame=30000 * 10)
    hi.wait()
    X1a = X1a.get_result()
    X1b = X1b.get_result()

    # labbox-ephys format for recordings
    le_recordings = []
    le_sortings = []
    le_recordings.append(
        dict(recordingId='allen_mouse419112_probeE',
             recordingLabel='allen_mouse419112_probeE (full)',
             recordingPath=ka.store_object(
                 X1, basename='allen_mouse419112_probeE.json'),
             recordingObject=X1,
             description='''
        A one hour neuropixels recording from Allen Institute
        '''.strip()))
    le_recordings.append(
        dict(recordingId='allen_mouse415148_probeE',
             recordingLabel='allen_mouse415148_probeE (full)',
             recordingPath=ka.store_object(
                 XX2, basename='allen_mouse415148_probeE.json'),
             recordingObject=XX2,
             description='''
        A one hour neuropixels recording from Allen Institute
        '''.strip()))
    le_sortings.append(
        dict(sortingId='allen_mouse419112_probeE:curated',
             sortingLabel='allen_mouse419112_probeE Curated',
             sortingPath=ka.store_object(
                 S1, basename='allen_mouse419112_probeE-curated.json'),
             sortingObject=S1,
             recordingId='allen_mouse419112_probeE',
             recordingPath=ka.store_object(
                 X1, basename='allen_mouse419112_probeE.json'),
             recordingObject=X1,
             description='''
        Curated spike sorting for allen_mouse419112_probeE
        '''.strip()))
    le_sortings.append(
        dict(sortingId='allen_mouse415148_probeE:curated',
             sortingLabel='allen_mouse415148_probeE Curated',
             sortingPath=ka.store_object(
                 S2, basename='allen_mouse415148_probeE-curated.json'),
             sortingObject=S2,
             recordingId='allen_mouse415148_probeE',
             recordingPath=ka.store_object(
                 XX2, basename='allen_mouse415148_probeE.json'),
             recordingObject=XX2,
             description='''
        Curated spike sorting for allen_mouse415148_probeE **Updated 9 Sep 2020**
        '''.strip()))
    le_recordings.append(
        dict(recordingId='allen_mouse419112_probeE-ch0-7.10sec',
             recordingLabel='allen_mouse419112_probeE (ch 0-7, 10 sec)',
             recordingPath=ka.store_object(
                 X1a, basename='allen_mouse419112_probeE-ch0-7-10sec.json'),
             recordingObject=X1a,
             description='''
        Subset of channels and first 10 seconds of allen_mouse419112_probeE
        '''.strip()))
    le_recordings.append(
        dict(recordingId='allen_mouse419112_probeE-10sec',
             recordingLabel='allen_mouse419112_probeE (10 sec)',
             recordingPath=ka.store_object(
                 X1b, basename='allen_mouse419112_probeE-10sec.json'),
             recordingObject=X1b,
             description='''
        First 10 seconds of allen_mouse419112_probeE
        '''.strip()))

    return le_recordings, le_sortings