def test_mearec_extractors(self):
        path1 = self.test_dir + '/raw.h5'
        se.MEArecRecordingExtractor.write_recording(self.RX, path1)
        RX_mearec = se.MEArecRecordingExtractor(path1)
        tr = RX_mearec.get_traces(channel_ids=[0, 1], end_frame=1000)
        self._check_recording_return_types(RX_mearec)
        self._check_recordings_equal(self.RX, RX_mearec)

        path2 = self.test_dir + '/firings_true.h5'
        se.MEArecSortingExtractor.write_sorting(self.SX, path2, self.RX.get_sampling_frequency())
        SX_mearec = se.MEArecSortingExtractor(path2)
        self._check_sorting_return_types(SX_mearec)
        self._check_sortings_equal(self.SX, SX_mearec)
Exemple #2
0
    def test_mearec_extractors(self):
        path1 = self.test_dir + '/raw'
        se.MEArecRecordingExtractor.writeRecording(self.RX, path1)
        RX_mearec = se.MEArecRecordingExtractor(path1)
        self._check_recording_return_types(RX_mearec)
        self._check_recordings_equal(self.RX, RX_mearec)

        path2 = self.test_dir + '/firings_true'
        se.MEArecSortingExtractor.writeSorting(self.SX, path2,
                                               self.RX.getSamplingFrequency())
        SX_mearec = se.MEArecSortingExtractor(path2)
        self._check_sorting_return_types(SX_mearec)
        self._check_sortings_equal(self.SX, SX_mearec)
Exemple #3
0
def test_import_from_spike_interface():
    import spikeextractors as se
    p = '/media/samuel/SamCNRS/DataSpikeSorting/mearec/'
    mearec_filename = p + 'recordings_50cells_SqMEA-10-15um_60.0_10.0uV_27-03-2019_13_31.h5'
    rec0 = se.MEArecRecordingExtractor(mearec_filename)

    for chan in rec0.get_channel_ids():  # remove3D
        loc = rec0.get_channel_property(chan, 'location')
        rec0.set_channel_property(chan, 'location', loc[1:])

    gt_sorting0 = se.MEArecSortingExtractor(mearec_filename)

    tdc_dirname = p + 'working_folder/output_folders/rec0/tridesclous/'

    if os.path.exists(tdc_dirname):
        shutil.rmtree(tdc_dirname)

    import_from_spike_interface(rec0, gt_sorting0, tdc_dirname)
Exemple #4
0
def get_recording_data(recording_file):
    recording = se.MEArecRecordingExtractor(recording_file)
    sorting = se.MEArecSortingExtractor(recording_file)
    geom = np.asarray(recording.get_channel_locations())

    spike_times = []
    for  unit_id in sorting.get_unit_ids():
        spike_train = sorting.get_unit_spike_train(unit_id=unit_id)
        spike_times.extend(spike_train)
    spike_times = sorted(spike_times)

    spike_frame_channel_array = []
    for i, spike_time in enumerate(spike_times):
        if i % (int(len(spike_times)/100)) == 0:
            print(int(float(i)/len(spike_times)*100), '%', end="\r")
        snippets = np.squeeze(recording.get_snippets(channel_ids=None, reference_frames=[spike_time], snippet_len=10),0)
        min_channel_id = np.argmin(np.min(snippets, 1))
        spike_frame_channel_array.append([spike_time, min_channel_id])
    print("100 %", end="\r")

    return geom, recording, spike_frame_channel_array
Exemple #5
0
# We now load the augmented training data and also the labels (provided for ground truth)

# In[4]:

recording_directory = './recordings/'
augmented_data_path = './recordings/'
recording_name = 'recordings_300_SqMEA-10-15um_minamp0_60s_10uV_far-neurons_bpf_25-03-2019.h5'
width = 40
amp_jitter = 0

recgen = mr.load_recordings(recording_directory + recording_name)
channel_positions = recgen.channel_positions
soma_positions = np.asarray(
    [st.annotations['soma_position'] for st in recgen.spiketrains])
SX_groundtruth = se.MEArecSortingExtractor(recording_directory +
                                           recording_name)

hf_AO = h5py.File(
    str(augmented_data_path) + 'model_data_gt_' + str(width) + 'um_VAE_' +
    str(amp_jitter) + '_amp_jitter_' + str(recording_name), 'r')
amp_array = np.asarray(hf_AO['amps_list'])
channel_loc_array = np.asarray(hf_AO['channel_locations_list'])
waveforms_array = np.asarray(hf_AO['waveforms_list'])
center_loc_array = np.asarray(hf_AO['center_location_list'])
central_channel_list = np.asarray(hf_AO['central_channel_list'])
spike_time_list = np.asarray(hf_AO['spike_time_list'])
spike_id_list = np.asarray(hf_AO['spike_id_list'])
hf_AO.close()

hf_label_AO = h5py.File(
    str(augmented_data_path) + 'label_data_gt_' + str(width) + 'um_VAE_' +
Exemple #6
0
# In[ ]:

# #Example MEArec dataset to be downloaded (4.2gb)
# file_url = 'https://www.dropbox.com/s/1jolgsw5kgxmsd5/recordings_300_SqMEA-10-15um_minamp0_60s_10uV_far-neurons_bpf_25-03-2019.h5?dl=1'
# file_name = '/disk/scratch/cole/recordingsrecordings_300_SqMEA-10-15um_minamp0_60s_10uV_far-neurons_bpf_25-03-2019.h5'

# urllib.request.urlretrieve(file_url, file_name)

# Here, the path to the MEArec recording and recording name are provided

# In[3]:

recording = se.MEArecRecordingExtractor(args.recording_directory +
                                        args.recording_name,
                                        locs_2d=False)
sorting = se.MEArecSortingExtractor(args.recording_directory +
                                    args.recording_name)
channel_positions = np.asarray(recording.get_channel_locations())

# In[4]:

spike_times = []
for unit_id in sorting.get_unit_ids():
    spike_train = sorting.get_unit_spike_train(unit_id=unit_id)
    spike_times.extend(spike_train)
spike_times = sorted(spike_times)

if args.num_spikes == -1:
    args.num_spikes = len(spike_times)
if args.save_every == None:
    args.save_every = args.num_spikes
def main():
    mt.configDownloadFrom('spikeforest.public')
    templates_path = 'sha1dir://95dba567b5168bacb480411ca334ffceb96b8c19.2019-06-11.templates'
    recordings_path = 'recordings_out'

    tempgen_tetrode = templates_path + '/templates_tetrode.h5'
    tempgen_neuronexus = templates_path + '/templates_neuronexus.h5'
    tempgen_neuropixels = templates_path + '/templates_neuropixels.h5'
    tempgen_neuronexus_drift = templates_path + '/templates_neuronexus_drift.h5'

    noise_level = [10, 20]
    duration = 600
    bursting = [False, True]
    nrec = 2  # change this to 10
    ei_ratio = 0.8
    rec_dict = {
        'tetrode': {
            'ncells': [10, 20],
            'tempgen': tempgen_tetrode,
            'drifting': False
        },
        'neuronexus': {
            'ncells': [10, 20, 40],
            'tempgen': tempgen_neuronexus,
            'drifting': False
        },
        'neuropixels': {
            'ncells': [20, 40, 60],
            'tempgen': tempgen_neuropixels,
            'drifting': False
        },
        'neuronexus_drift': {
            'ncells': [10, 20, 40],
            'tempgen': tempgen_neuronexus_drift,
            'drifting': True
        }
    }

    # optional: if drifting change drift velocity
    # recording_params['recordings']['drift_velocity] = ...

    # Generate and save recordings
    if os.path.exists(recordings_path):
        shutil.rmtree(recordings_path)
    os.mkdir(recordings_path)

    # Set up slurm configuration
    slurm_working_dir = 'tmp_slurm_job_handler_' + _random_string(5)
    job_handler = mlpr.SlurmJobHandler(working_dir=slurm_working_dir)
    use_slurm = True
    job_timeout = 3600 * 4
    if use_slurm:
        job_handler.addBatchType(name='default',
                                 num_workers_per_batch=4,
                                 num_cores_per_job=6,
                                 time_limit_per_batch=job_timeout * 3,
                                 use_slurm=True,
                                 max_simultaneous_batches=20,
                                 additional_srun_opts=['-p ccm'])
    else:
        job_handler.addBatchType(
            name='default',
            num_workers_per_batch=multiprocessing.cpu_count(),
            num_cores_per_job=2,
            max_simultaneous_batches=1,
            use_slurm=False)
    with mlpr.JobQueue(job_handler=job_handler) as JQ:
        results_to_write = []
        for rec_type in rec_dict.keys():
            study_set_name = 'SYNTH_MEAREC_{}'.format(rec_type.upper())
            os.mkdir(recordings_path + '/' + study_set_name)
            params = dict()
            params['duration'] = duration
            params['drifting'] = rec_dict[rec_type]['drifting']
            # reduce minimum distance for dense recordings
            params['min_dist'] = 15
            for ncells in rec_dict[rec_type]['ncells']:
                # changing number of cells
                n_exc = int(ei_ratio *
                            10)  # intentionally replaced nrec by 10 here
                params['n_exc'] = n_exc
                params['n_inh'] = ncells - n_exc
                for n in noise_level:
                    # changing noise level
                    params['noise_level'] = n
                    for b in bursting:
                        bursting_str = ''
                        if b:
                            bursting_str = '_bursting'
                        study_name = 'synth_mearec_{}_noise{}_K{}{}'.format(
                            rec_type, n, ncells, bursting_str)
                        os.mkdir(recordings_path + '/' + study_set_name + '/' +
                                 study_name)
                        for i in range(nrec):
                            # set random seeds
                            params[
                                'seed'] = i  # intentionally doing it this way

                            # changing bursting and shape modulation
                            print('Generating', rec_type, 'recording with',
                                  ncells, 'noise level', n, 'bursting', b)
                            params['bursting'] = b
                            params['shape_mod'] = b
                            templates0 = mt.realizeFile(
                                path=rec_dict[rec_type]['tempgen'])
                            result0 = GenerateMearecRecording.execute(
                                **params,
                                templates_in=templates0,
                                recording_out=dict(ext='.h5'))
                            mda_output_folder = recordings_path + '/' + study_set_name + '/' + study_name + '/' + '{}'.format(
                                i)
                            results_to_write.append(
                                dict(result=result0,
                                     mda_output_folder=mda_output_folder))
        JQ.wait()

        for x in results_to_write:
            result0: mlpr.MountainJobResult = x['result']
            mda_output_folder = x['mda_output_folder']
            path = mt.realizeFile(path=result0.outputs['recording_out'])
            recording = se.MEArecRecordingExtractor(recording_path=path)
            sorting_true = se.MEArecSortingExtractor(recording_path=path)
            se.MdaRecordingExtractor.write_recording(
                recording=recording, save_path=mda_output_folder)
            se.MdaSortingExtractor.write_sorting(sorting=sorting_true,
                                                 save_path=mda_output_folder +
                                                 '/firings_true.mda')
            if result0.console_out:
                mt.realizeFile(path=result0.console_out,
                               dest_path=mda_output_folder +
                               '.console_out.txt')
            if result0.runtime_info:
                mt.saveObject(object=result0.runtime_info,
                              dest_path=mda_output_folder +
                              '.runtime_info.json')

    print('Creating and uploading snapshot...')
    sha1dir_path = mt.createSnapshot(path=recordings_path,
                                     upload_to='spikeforest.public',
                                     upload_recursive=False)
    # sha1dir_path = mt.createSnapshot(path=recordings_path, upload_to='spikeforest.kbucket', upload_recursive=True)
    print(sha1dir_path)