Пример #1
0
def get_timeseries_segment(recording_object, ds_factor, segment_num,
                           segment_size):
    import time
    recording0 = le.LabboxEphysRecordingExtractor(recording_object,
                                                  download=False)

    t1 = segment_num * segment_size * ds_factor
    t2 = ((segment_num + 1) * segment_size * ds_factor)
    if t2 > recording0.get_num_frames():
        t2 = int(recording0.get_num_frames() / ds_factor) * ds_factor
    traces = recording0.get_traces(start_frame=t1, end_frame=t2)
    M = traces.shape[0]
    N = traces.shape[1]
    if ds_factor > 1:
        N2 = int(N / ds_factor)
        traces_reshaped = traces.reshape((M, N2, ds_factor))
        traces_min = np.min(traces_reshaped, axis=2)
        traces_max = np.max(traces_reshaped, axis=2)
        traces = np.zeros((M, N2 * 2))
        traces[:, 0::2] = traces_min
        traces[:, 1::2] = traces_max

    data_b64 = _mda32_to_base64(traces)
    # elapsed = time.time() - timer
    return dict(data_b64=data_b64)
Пример #2
0
def old_fetch_average_waveform_plot_data(recording_object, sorting_object,
                                         unit_id):
    import labbox_ephys as le
    R = le.LabboxEphysRecordingExtractor(recording_object)
    S = le.LabboxEphysSortingExtractor(sorting_object)

    start_frame = 0
    end_frame = R.get_sampling_frequency() * 30
    R0 = se.SubRecordingExtractor(parent_recording=R,
                                  start_frame=start_frame,
                                  end_frame=end_frame)
    S0 = se.SubSortingExtractor(parent_sorting=S,
                                start_frame=start_frame,
                                end_frame=end_frame)

    times0 = S0.get_unit_spike_train(unit_id=unit_id)
    if len(times0) == 0:
        # no waveforms found
        return dict(channel_id=None, average_waveform=None)
    try:
        average_waveform = st.postprocessing.get_unit_templates(
            recording=R0, sorting=S0, unit_ids=[unit_id])[0]
    except:
        raise Exception(f'Error getting unit templates for unit {unit_id}')

    channel_maximums = np.max(np.abs(average_waveform), axis=1)
    maxchan_index = np.argmax(channel_maximums)
    maxchan_id = R0.get_channel_ids()[maxchan_index]

    return dict(channel_id=maxchan_id,
                average_waveform=average_waveform[maxchan_index, :].tolist())
def main():
    recording_object = {
        "data": {
            "geom": [[1, 0], [2, 0], [3, 0], [4, 0]],
            "params": {
                "samplerate": 30000,
                "spike_sign": -1
            },
            "raw":
            "sha1dir://fb52d510d2543634e247e0d2d1d4390be9ed9e20.synth_magland/datasets_noise10_K10_C4/001_synth/raw.mda"
        },
        "recording_format": "mda"
    }
    sorting_object = {
        "data": {
            "firings":
            "sha1dir://fb52d510d2543634e247e0d2d1d4390be9ed9e20.synth_magland/datasets_noise10_K10_C4/001_synth/firings_true.mda",
            "samplerate": 30000
        },
        "sorting_format": "mda"
    }
    recording = le.LabboxEphysRecordingExtractor(recording_object)
    sorting = le.LabboxEphysSortingExtractor(sorting_object)
    h5_path = le.prepare_snippets_h5.run(
        sorting_object=sorting_object,
        recording_object=recording_object).wait()
    print(h5_path)
Пример #4
0
def get_sorting_info(sorting_object, recording_object):
    sorting = le.LabboxEphysSortingExtractor(sorting_object)
    recording = le.LabboxEphysRecordingExtractor(recording_object, download=False)
    return dict(
        unit_ids=_to_int_list(sorting.get_unit_ids()),
        samplerate=recording.get_sampling_frequency()
    )
def calculate_timeseries_info(recording_object):
    recording0 = le.LabboxEphysRecordingExtractor(recording_object, download=False)

    traces0 = recording0.get_traces(
        channel_ids=recording0.get_channel_ids(),
        start_frame=0, end_frame=min(recording0.get_num_frames(), 25000
    ))

    y_offsets = -np.mean(traces0, axis=1)
    for m in range(traces0.shape[0]):
        traces0[m, :] = traces0[m, :] + y_offsets[m]
    vv = np.percentile(np.abs(traces0), 90)
    y_scale_factor = 1 / (2 * vv) if vv > 0 else 1

    # segment_size_times_num_channels = 1000000
    segment_size_times_num_channels = 100000
    segment_size = int(np.ceil(segment_size_times_num_channels / recording0.get_num_channels()))

    return dict(
        samplerate=recording0.get_sampling_frequency(),
        num_channels=len(recording0.get_channel_ids()),
        channel_ids=recording0.get_channel_ids(),
        channel_locations=geom_from_recording(recording0).tolist(),
        num_timepoints=recording0.get_num_frames(),
        y_offsets=y_offsets.astype(float).tolist(),
        y_scale_factor=float(y_scale_factor),
        initial_y_scale_factor=1,
        segment_size=segment_size
    )
def get_franklab_datajoint_importable_recordings(config):
    import labbox_ephys as le

    os.environ['DJ_SUPPORT_FILEPATH_MANAGEMENT'] = 'TRUE'
    import datajoint as dj
    dj.config['enable_python_native_blobs'] = True
    dj.config['database.port'] = config['port']
    dj.config['database.user'] = config['user']
    dj.config['database.password'] = config['password']

    # must config dj prior to importing
    import nwb_datajoint as nwbdj

    ret = []
    for session in nwbdj.Session():
        nwb_file_sha1 = session['nwb_file_sha1']
        nwb_file_name = session['nwb_file_name']
        nwb_path = f'sha1://{nwb_file_sha1}/file.nwb'
        try:
            recording = le.LabboxEphysRecordingExtractor(nwb_path)
            recording_object = recording.object()
        except:
            traceback.print_exc()
            print(f'Warning: problem loading recording: {nwb_path}')
            recording_object = None
        if recording_object is not None:
            info = le.get_recording_info(recording_object)
            ret.append(dict(
                label=nwb_file_name,
                path=nwb_path,
                recording_object=recording_object,
                recording_info=info
            ))
    return ret
Пример #7
0
def preload_download_recording_2(recording_object):
    import kachery_p2p as kp
    try:
        X = le.LabboxEphysRecordingExtractor(recording_object, download=True)
    except:
        return {'success': False}
    return {'success': True}
Пример #8
0
def get_recording_info(recording_object):
    recording = le.LabboxEphysRecordingExtractor(recording_object,
                                                 download=False)
    return dict(sampling_frequency=recording.get_sampling_frequency(),
                channel_ids=recording.get_channel_ids(),
                channel_groups=recording.get_channel_groups().tolist(),
                geom=geom_from_recording(recording).tolist(),
                num_frames=recording.get_num_frames(),
                is_local=recording.is_local())
Пример #9
0
    def javascript_state_changed(self, prev_state, state):
        self._set_status('running', 'Running TimeseriesView')
        self._create_efficient_access = state.get('create_efficient_access',
                                                  False)
        if not self._recording:
            self._set_status('running', 'Loading recording')
            recording0 = state.get('recording', None)
            if not recording0:
                self._set_error('Missing: recording')
                return
            try:
                self._recording = le.LabboxEphysRecordingExtractor(recording0)
            except Exception as err:
                traceback.print_exc()
                self._set_error('Problem initiating recording: {}'.format(err))
                return

            self._set_status('running', 'Loading recording data')
            traces0 = self._recording.get_traces(
                channel_ids=self._recording.get_channel_ids(),
                start_frame=0,
                end_frame=min(self._recording.get_num_frames(), 25000))
            y_offsets = -np.mean(traces0, axis=1)
            for m in range(traces0.shape[0]):
                traces0[m, :] = traces0[m, :] + y_offsets[m]
            vv = np.percentile(np.abs(traces0), 90)
            y_scale_factor = 1 / (2 * vv) if vv > 0 else 1
            self._segment_size = int(
                np.ceil(self._segment_size_times_num_channels /
                        self._recording.get_num_channels()))
            try:
                channel_locations = self._recording.get_channel_locations()
            except:
                channel_locations = None
            self.set_state(
                dict(num_channels=self._recording.get_num_channels(),
                     channel_ids=self._recording.get_channel_ids(),
                     channel_locations=channel_locations,
                     num_timepoints=self._recording.get_num_frames(),
                     y_offsets=y_offsets,
                     y_scale_factor=y_scale_factor,
                     samplerate=self._recording.get_sampling_frequency(),
                     segment_size=self._segment_size,
                     status_message='Loaded recording.'))

        # SR = state.get('segmentsRequested', {})
        # for key in SR.keys():
        #     aa = SR[key]
        #     if not self.get_python_state(key, None):
        #         self.set_state(dict(status_message='Loading segment {}'.format(key)))
        #         data0 = self._load_data(aa['ds'], aa['ss'])
        #         data0_base64 = _mda32_to_base64(data0)
        #         state0 = {}
        #         state0[key] = dict(data=data0_base64, ds=aa['ds'], ss=aa['ss'])
        #         self.set_state(state0)
        #         self.set_state(dict(status_message='Loaded segment {}'.format(key)))
        self._set_status('finished', '')
Пример #10
0
def mountainsort4b(recording_object: dict,
                   detect_sign=-1,
                   adjacency_radius=50,
                   clip_size=50,
                   detect_threshold=3,
                   detect_interval=10,
                   freq_min=300,
                   freq_max=6000,
                   whiten=True,
                   curation=False,
                   filter=True):
    # Unfortunately we need to duplicate wrapper code from spikeforest2 due to trickiness in running code in containers. Will need to think about this
    # import spiketoolkit as st
    import spikesorters as ss
    import labbox_ephys as le

    recording = le.LabboxEphysRecordingExtractor(recording_object)

    # for quick testing
    # import spikeextractors as se
    # recording = se.SubRecordingExtractor(parent_recording=recording_object, start_frame=0, end_frame=30000 * 1)

    # Preprocessing
    # print('Preprocessing...')
    # recording = st.preprocessing.bandpass_filter(recording_object, freq_min=300, freq_max=6000)
    # recording = st.preprocessing.whiten(recording_object)

    # Sorting
    print('Sorting...')
    with hi.TemporaryDirectory() as tmpdir:
        sorter = ss.Mountainsort4Sorter(recording=recording,
                                        output_folder=tmpdir,
                                        delete_output_folder=False)

        num_workers = os.environ.get('NUM_WORKERS', None)
        if num_workers:
            num_workers = int(num_workers)
        else:
            num_workers = 0

        sorter.set_params(detect_sign=detect_sign,
                          adjacency_radius=adjacency_radius,
                          clip_size=clip_size,
                          detect_threshold=detect_threshold,
                          detect_interval=detect_interval,
                          num_workers=num_workers,
                          curation=curation,
                          whiten=whiten,
                          filter=filter,
                          freq_min=freq_min,
                          freq_max=freq_max)
        timer = sorter.run()
        print('#SF-SORTER-RUNTIME#{:.3f}#'.format(timer))
        sorting = sorter.get_result()
        sorting_object = _create_sorting_object(sorting)
        return dict(sorting_object=sorting_object)
Пример #11
0
def preload_check_recording_downloaded_2(recording_object):
    import kachery_p2p as kp
    try:
        kp._experimental_config(nop2p=True)
        X = le.LabboxEphysRecordingExtractor(recording_object, download=False)
    except:
        return {'isLocal': False}
    finally:
        kp._experimental_config(nop2p=False)
    return {'isLocal': True}
Пример #12
0
    def get_gt_recording(self, study_name, recording_name, download=False):
        # get study
        dataset = self._df.query(f"studyName == '{study_name}'")
        assert len(dataset) > 0, f"Study '{study_name}' not found"

        # get recording
        dataset = dataset.query(f"recordingName == '{recording_name}'")
        assert len(dataset) > 0, f"Recording '{recording_name}' not found"

        recording_uri = dataset.iloc[0]["recordingUri"]
        recording = le.LabboxEphysRecordingExtractor(recording_uri,
                                                     download=download)

        return recording
Пример #13
0
def main():
    snippets_h5_uri = 'sha1://55c0cb6a63231236b6948b0dd422e6fedc75c5b5/real_snippets.h5?manifest=b124474caccccdba135d9550ec544a88caf531aa'
    recording_obj = {
        'recording_format': 'snippets1',
        'data': {
            'snippets_h5_uri': snippets_h5_uri
        }
    }
    sorting_obj = {
        'sorting_format': 'snippets1',
        'data': {
            'snippets_h5_uri': snippets_h5_uri
        }
    }
    recording = le.LabboxEphysRecordingExtractor(recording_obj)
    sorting = le.LabboxEphysSortingExtractor(sorting_obj)
    print(recording.get_sampling_frequency())
    print(recording.get_channel_ids())

    le_recordings = []
    le_sortings = []

    le_recordings.append(
        dict(recordingId='loren_example1',
             recordingLabel='loren_example1',
             recordingPath=ka.store_object(recording_obj,
                                           basename='loren_example1.json'),
             recordingObject=recording_obj,
             description='''
        Example from Loren Frank
        '''.strip()))
    le_sortings.append(
        dict(sortingId='loren_example1:mountainsort4',
             sortingLabel='loren_example1:mountainsort4',
             sortingPath=ka.store_object(
                 sorting_obj, basename='loren_example-mountainsort4.json'),
             sortingObject=sorting_obj,
             recordingId='loren_example1',
             recordingPath=ka.store_object(recording_obj,
                                           basename='loren_example1.json'),
             recordingObject=recording_obj,
             description='''
        Example from Loren Frank (MountainSort4)
        '''.strip()))

    feed_uri = create_labbox_ephys_feed(le_recordings, le_sortings)
    print(feed_uri)
Пример #14
0
def main():
    snippets_h5_uri = 'sha1://5fc6996dfed9e7fd577bc85194d982a1ba52085e/real_snippets_1.h5?manifest=741f23273c3121aada6d9bdb67009c8c2ae1ed77'
    recording_obj = {
        'recording_format': 'snippets1',
        'data': {
            'snippets_h5_uri': snippets_h5_uri
        }
    }
    sorting_obj = {
        'sorting_format': 'snippets1',
        'data': {
            'snippets_h5_uri': snippets_h5_uri
        }
    }
    recording = le.LabboxEphysRecordingExtractor(recording_obj)
    sorting = le.LabboxEphysSortingExtractor(sorting_obj)
    print(recording.get_sampling_frequency())
    print(recording.get_channel_ids())

    le_recordings = []
    le_sortings = []

    le_recordings.append(
        dict(recordingId='loren_example1',
             recordingLabel='loren_example1',
             recordingPath=ka.store_object(recording_obj,
                                           basename='loren_example1.json'),
             recordingObject=recording_obj,
             description='''
        Example from Loren Frank
        '''.strip()))
    le_sortings.append(
        dict(sortingId='loren_example1:mountainsort4',
             sortingLabel='loren_example1:mountainsort4',
             sortingPath=ka.store_object(
                 sorting_obj, basename='loren_example-mountainsort4.json'),
             sortingObject=sorting_obj,
             recordingId='loren_example1',
             recordingPath=ka.store_object(recording_obj,
                                           basename='loren_example1.json'),
             recordingObject=recording_obj,
             description='''
        Example from Loren Frank (MountainSort4)
        '''.strip()))

    feed_uri = create_labbox_ephys_feed(le_recordings, le_sortings)
    print(feed_uri)
Пример #15
0
def main():
    # Sorting: cortexlab-single-phase-3 Curated (good units) for cortexlab-single-phase-3 (full)
    recording_object = kp.load_object(
        'sha1://8b222e25bc4d9c792e4490ca322b5338e0795596/cortexlab-single-phase-3.json'
    )
    sorting_object = {
        "sorting_format": "h5_v1",
        "data": {
            "h5_path":
            "sha1://68029d0eded8ca7d8f95c16dea81318966ae9b55/sorting.h5?manifest=12b0d8e37c7050a6fe636d4c16ed143bbd5dab0c"
        }
    }
    recording = le.LabboxEphysRecordingExtractor(recording_object)
    sorting = le.LabboxEphysSortingExtractor(sorting_object)
    h5_path = le.prepare_snippets_h5.run(sorting_object=sorting_object,
                                         recording_object=recording_object,
                                         start_frame=0,
                                         end_frame=30000 * 240).wait()
    print(h5_path)
Пример #16
0
def get_isi_violation_rates(sorting_object, recording_object, configuration={}):
    import labbox_ephys as le
    import spikemetrics as sm
    S = le.LabboxEphysSortingExtractor(sorting_object)
    R = le.LabboxEphysRecordingExtractor(recording_object)

    samplerate = R.get_sampling_frequency()
#    duration_sec = R.get_num_frames() / samplerate

    isi_threshold_msec = configuration.get('isi_threshold_msec', 2.5)
    unit_ids = configuration.get('unit_ids', S.get_unit_ids())

    ret = {}
    for id in unit_ids:
        spike_train = S.get_unit_spike_train(unit_id=id)
        ret[str(id)], _ = sm.metrics.isi_violations( #_ is total violations
            spike_train=spike_train,
            duration=R.get_num_frames(),
            isi_threshold=isi_threshold_msec / 1000 * samplerate
        )
    return ret
Пример #17
0
def get_timeseries_segment(recording_object, ds_factor, segment_num,
                           segment_size):
    import time
    recording0 = le.LabboxEphysRecordingExtractor(recording_object,
                                                  download=False)

    t1 = segment_num * segment_size * ds_factor
    t2 = ((segment_num + 1) * segment_size * ds_factor)
    if t2 > recording0.get_num_frames():
        t2 = int(recording0.get_num_frames() / ds_factor) * ds_factor
    traces = recording0.get_traces(start_frame=t1, end_frame=t2)
    M = traces.shape[0]
    N = traces.shape[1]
    if ds_factor > 1:
        N2 = int(N / ds_factor)
        traces_reshaped = traces.reshape((M, N2, ds_factor))
        traces_min = np.min(traces_reshaped, axis=2)
        traces_max = np.max(traces_reshaped, axis=2)
        traces = np.zeros((M, N2 * 2), dtype=np.float32)
        traces[:, 0::2] = traces_min
        traces[:, 1::2] = traces_max

    return {'traces': traces.astype(np.float32)}
Пример #18
0
def prepare_snippets_h5(recording_object,
                        sorting_object,
                        start_frame=None,
                        end_frame=None,
                        max_events_per_unit=None,
                        max_neighborhood_size=15):
    if recording_object['recording_format'] == 'snippets1':
        return recording_object['data']['snippets_h5_uri']

    import labbox_ephys as le
    recording = le.LabboxEphysRecordingExtractor(recording_object)
    sorting = le.LabboxEphysSortingExtractor(sorting_object)

    with hi.TemporaryDirectory() as tmpdir:
        save_path = tmpdir + '/snippets.h5'
        prepare_snippets_h5_from_extractors(
            recording=recording,
            sorting=sorting,
            output_h5_path=save_path,
            start_frame=start_frame,
            end_frame=end_frame,
            max_events_per_unit=max_events_per_unit,
            max_neighborhood_size=max_neighborhood_size)
        return ka.store_file(save_path)
Пример #19
0
    "label":
    "SF/PAIRED_KAMPFF/paired_kampff/2014_11_25_Pair_3_0",
    "recording_uri":
    "sha1://a205f87cef8b7f86df7a09cddbc79a1fbe5df60f/SF/PAIRED_KAMPFF/paired_kampff/2014_11_25_Pair_3_0.json",
    "sorting_true_uri":
    "sha1://1cd517687aeca7ecdfaa9695680038d142a75031/firings_true.mda"
}
# To find more examples, see: https://github.com/flatironinstitute/spikeforest_recordings
# However: note that some processing needs to be done to the files in this repo (to add the manifests to the raw data). This is WIP

# Adjust these values ###########################
X = X1  # Select example from above
feed_name = 'labbox-ephys-default'
workspace_name = 'default'
#################################################

recording_label = X['label']
recording_uri = X['recording_uri']
sorting_true_uri = X['sorting_true_uri']
recording = le.LabboxEphysRecordingExtractor(recording_uri, download=True)
sorting_true = le.LabboxEphysSortingExtractor(sorting_true_uri,
                                              samplerate=30000)

sorting_label = 'true'
feed = kp.load_feed(feed_name, create=True)
workspace = le.load_workspace(workspace_name=workspace_name, feed=feed)
print(f'Feed URI: {feed.get_uri()}')
R_id = workspace.add_recording(recording=recording, label=recording_label)
S_id = workspace.add_sorting(sorting=sorting_true,
                             recording_id=R_id,
                             label=sorting_label)
    end_frame=None,
    max_events_per_unit=1000,
    max_neighborhood_size=2
)

# Example display some contents of the file
with h5py.File(output_h5_path, 'r') as f:
    unit_ids = np.array(f.get('unit_ids'))
    sampling_frequency = np.array(f.get('sampling_frequency'))[0].item()
    if np.isnan(sampling_frequency):
        print('WARNING: sampling frequency is nan. Using 30000 for now. Please correct the snippets file.')
        sampling_frequency = 30000
    print(f'Unit IDs: {unit_ids}')
    print(f'Sampling freq: {sampling_frequency}')
    for unit_id in unit_ids:
        unit_spike_train = np.array(f.get(f'unit_spike_trains/{unit_id}'))
        unit_waveforms = np.array(f.get(f'unit_waveforms/{unit_id}/waveforms'))
        unit_waveforms_channel_ids = np.array(f.get(f'unit_waveforms/{unit_id}/channel_ids'))
        print(f'Unit {unit_id} | Tot num events: {len(unit_spike_train)} | shape of subsampled snippets: {unit_waveforms.shape}')

recording = le.LabboxEphysRecordingExtractor({
    'recording_format': 'snippets1',
    'data': {
        'snippets_h5_uri': ka.store_file(output_h5_path)
    }
})
print(f'Channel IDs: {recording.get_channel_ids()}')
print(f'Num. frames: {recording.get_num_frames()}')
for channel_id in recording.get_channel_ids():
    print(f'Channel {channel_id}: {recording.get_channel_property(channel_id, "location")}')
Пример #21
0
def get_structure(sorting_object, recording_object):
    import labbox_ephys as le
    S = le.LabboxEphysSortingExtractor(sorting_object)
    R = le.LabboxEphysRecordingExtractor(recording_object)
    return S, R
Пример #22
0
def download_recording(recording_object):
    recording = le.LabboxEphysRecordingExtractor(recording_object,
                                                 download=False)
    recording.download()
def get_sorting_object(sorting_path, recording_object):
    recording = le.LabboxEphysRecordingExtractor(recording_object,
                                                 download=False)
    sorting = le.LabboxEphysSortingExtractor(
        sorting_path, samplerate=recording.get_sampling_frequency())
    return sorting.object()
Пример #24
0
def get_recording_object(recording_path):
    recording = le.LabboxEphysRecordingExtractor(recording_path,
                                                 download=False)
    return recording.object()
def recording_is_downloaded(recording_object):
    recording = le.LabboxEphysRecordingExtractor(recording_object,
                                                 download=False)
    return recording.is_local()