Exemplo n.º 1
0
def test_raises_on_empty_h5_file():

    filename = os.path.join(TEST_DATA_PATH, "empty.nwb")

    with h5py.File(filename, 'w'):
        pass

    with pytest.raises(ValueError, match=r'unknown NWB major'):
        create_nwb_reader(filename)
Exemplo n.º 2
0
def test_valid_v1_but_unknown_sweep_naming():

    filename = os.path.join(TEST_DATA_PATH,
                            'invalid_sweep_naming_convention.nwb')

    with h5py.File(filename, 'w') as fh:
        dt = h5py.special_dtype(vlen=bytes)
        dset = fh.create_dataset("nwb_version", (1, ), dtype=dt)
        dset[:] = "NWB-1"

    with pytest.raises(ValueError, match=r'sweep naming convention'):
        create_nwb_reader(filename)
Exemplo n.º 3
0
    def __init__(self, input_file, output_file):
        """
        Convert NWB v1 to v2

        """

        self.nwb_data = nwb_reader.create_nwb_reader(input_file)
        self.notebook = lab_notebook_reader.create_lab_notebook_reader(
            input_file)

        nwb_file = self.create_nwb_file()

        device = nwb_file.create_device(name='electrode_0')

        electrode = nwb_file.create_ic_electrode(
            name="elec0", description=' some kind of electrode', device=device)

        for i in self.create_stimulus_series(electrode):
            nwb_file.add_stimulus(i)

        for i in self.create_acquisition_series(electrode):
            nwb_file.add_acquisition(i)

        with NWBHDF5IO(output_file, "w") as io:
            io.write(nwb_file, cache_spec=True)
Exemplo n.º 4
0
    def __init__(
            self, 
            sweep_info=None, 
            nwb_file=None, 
            h5_file=None,
            ontology=None, 
            api_sweeps=True, 
            validate_stim=True,
            deprecation_warning=True
    ):
        super(AibsDataSet, self).__init__(
            ontology, validate_stim, deprecation_warning=deprecation_warning
        )

        self._nwb_data = nwb_reader.create_nwb_reader(nwb_file)

        if sweep_info:
            sweep_info = sp.modify_sweep_info_keys(sweep_info) if api_sweeps else sweep_info

            # Remove sweeps not found in nwb_data sweep map
            sweep_numbers_in_map = self._nwb_data.sweep_map_table["sweep_number"].tolist()
            sweep_info = [si for si in sweep_info if si["sweep_number"] in sweep_numbers_in_map]
        else:
            self.notebook = lab_notebook_reader.create_lab_notebook_reader(nwb_file, h5_file)
            sweep_info = self.extract_sweep_stim_info()

        self.build_sweep_table(sweep_info, deprecation_warning=False)
Exemplo n.º 5
0
def test_valid_v1_skeleton_X_NWB():
    filename = os.path.join(TEST_DATA_PATH, 'valid_v2.nwb')

    with h5py.File(filename, 'w') as fh:
        fh.attrs["nwb_version"] = "2"

    reader = create_nwb_reader(filename)
    assert isinstance(reader, NwbXReader)
Exemplo n.º 6
0
def test_valid_v1_full_MIES_3(NWB_file):

    reader = create_nwb_reader(NWB_file)

    assert isinstance(reader, NwbMiesReader)

    sweep_names_ref = [u'data_00000_AD0']

    sweep_names = reader.get_sweep_names()
    assert sorted(sweep_names_ref) == sorted(sweep_names)

    assert reader.get_pipeline_version() == (0, 0)

    assert reader.get_sweep_number("data_00000_AD0") == 0

    assert reader.get_stim_code(0) == "EXTPSMOKET180424"

    # ignore very long comment
    sweep_attrs_ref = {
        u'ancestry':
        np.array([u'TimeSeries', u'PatchClampSeries', u'VoltageClampSeries'],
                 dtype=object),
        u'comment':
        None,
        u'description':
        u'PLACEHOLDER',
        u'missing_fields':
        np.array([
            u'resistance_comp_bandwidth', u'resistance_comp_correction',
            u'resistance_comp_prediction', u'whole_cell_capacitance_comp',
            u'whole_cell_series_resistance_comp'
        ],
                 dtype=object),
        u'neurodata_type':
        u'TimeSeries',
        u'source':
        u'Device=ITC18USB_Dev_0;Sweep=0;AD=0;ElectrodeNumber=0;ElectrodeName=0'
    }

    sweep_attrs = reader.get_sweep_attrs(0)
    sweep_attrs['comment'] = None

    compare_dicts(sweep_attrs_ref, sweep_attrs)

    # assume the data itself is correct and replace it with None
    sweep_data_ref = {
        'response': None,
        'sampling_rate': 200000.0,
        'stimulus': None,
        'stimulus_unit': 'Volts'
    }

    sweep_data = reader.get_sweep_data(0)
    sweep_data['response'] = None
    sweep_data['stimulus'] = None

    assert sweep_data_ref == sweep_data
Exemplo n.º 7
0
def test_sweep_map_sweep_numbers(NWB_file):

    sweep_numbers_ref = np.arange(0, 71)
    reader = create_nwb_reader(NWB_file)

    sweep_map_table = reader.sweep_map_table
    sweep_numbers = sweep_map_table["sweep_number"].values
    print(sweep_numbers_ref)

    assert (sweep_numbers == sweep_numbers_ref).all()
Exemplo n.º 8
0
def test_valid_v1_with_no_sweeps():

    filename = os.path.join(TEST_DATA_PATH, 'no_sweeps.nwb')

    with h5py.File(filename, 'w') as fh:
        dt = h5py.special_dtype(vlen=bytes)
        dset = fh.create_dataset("nwb_version", (1, ), dtype=dt)
        dset[:] = "NWB-1"
        fh.create_group("acquisition/timeseries")

    reader = create_nwb_reader(filename)
    assert isinstance(reader, NwbMiesReader)
Exemplo n.º 9
0
def test_valid_v2_full_DAT(NWB_file):
    reader = create_nwb_reader(NWB_file)
    assert isinstance(reader, NwbXReader)

    sweep_names_ref = ['index_{:02d}'.format(x) for x in range(0, 78)]

    sweep_names = reader.get_sweep_names()
    assert sorted(sweep_names_ref) == sorted(sweep_names)

    assert reader.get_pipeline_version() == (0, 0)

    assert reader.get_sweep_number("index_00") == 10101

    assert reader.get_stim_code(10101) == "extpinbath"

    # ignore very long description
    sweep_attrs_ref = {
        u'capacitance_fast': 0.0,
        u'capacitance_slow': np.nan,
        u'comments': u'no comments',
        u'description': None,
        u'gain': 5000000.0,
        u'help': u'Current recorded from cell during voltage-clamp recording',
        u'namespace': u'core',
        u'neurodata_type': u'VoltageClampSeries',
        u'resistance_comp_bandwidth': np.nan,
        u'resistance_comp_correction': np.nan,
        u'resistance_comp_prediction': np.nan,
        u'starting_time': 3768.2174599999998,
        u'stimulus_description': u'extpinbath',
        u'sweep_number': 10101,
        u'whole_cell_capacitance_comp': np.nan,
        u'whole_cell_series_resistance_comp': np.nan
    }

    sweep_attrs = reader.get_sweep_attrs(10101)
    sweep_attrs['description'] = None

    compare_dicts(sweep_attrs_ref, sweep_attrs)

    # assume the data itself is correct and replace it with None
    sweep_data_ref = {
        'response': None,
        'sampling_rate': 200000.00000000003,
        'stimulus': None,
        'stimulus_unit': 'Volts'
    }

    sweep_data = reader.get_sweep_data(10101)
    sweep_data['response'] = None
    sweep_data['stimulus'] = None

    assert sweep_data_ref == sweep_data
Exemplo n.º 10
0
def test_sweep_map_sweep_0(NWB_file):

    reader = create_nwb_reader(NWB_file)
    sweep_map_ref = {
        'acquisition_group': u'data_00046_AD0',
        'stimulus_group': u'data_00046_DA0',
        'sweep_number': 0,
        'starting_time': 2740.1590003967285
    }

    sweep_map = reader.get_sweep_map(0)
    assert sweep_map == sweep_map_ref
Exemplo n.º 11
0
    def __init__(self,
                 sweep_info=None,
                 nwb_file=None,
                 ontology=None,
                 api_sweeps=True,
                 validate_stim=True):
        super(HBGDataSet, self).__init__(ontology, validate_stim)
        self._nwb_data = nwb_reader.create_nwb_reader(nwb_file)

        if sweep_info is None:
            sweep_info = self.extract_sweep_stim_info()

        self.build_sweep_table(sweep_info)
Exemplo n.º 12
0
def test_valid_v1_skeleton_Pipeline():
    filename = os.path.join(TEST_DATA_PATH, 'valid_v1_Pipeline.nwb')

    with h5py.File(filename, 'w') as fh:
        dt = h5py.special_dtype(vlen=bytes)
        dset = fh.create_dataset("nwb_version", (1, ), dtype=dt)
        dset[:] = "NWB-1"

        dset = fh.create_dataset("acquisition/timeseries/Sweep_0", (1, ),
                                 dtype="f")

    reader = create_nwb_reader(filename)
    assert isinstance(reader, NwbPipelineReader)
Exemplo n.º 13
0
def test_valid_v2_full_ABF(NWB_file):

    reader = create_nwb_reader(NWB_file)
    assert isinstance(reader, NwbXReader)

    sweep_names_ref = [u'index_0']

    sweep_names = reader.get_sweep_names()
    assert sorted(sweep_names_ref) == sorted(sweep_names)

    assert reader.get_pipeline_version() == (0, 0)

    assert reader.get_sweep_number("index_0") == 0

    assert reader.get_stim_code(0) == "RAMP1"

    # ignore very long description
    sweep_attrs_ref = {
        u'bias_current': np.nan,
        u'bridge_balance': np.nan,
        u'capacitance_compensation': np.nan,
        u'comments': u'no comments',
        u'description': None,
        u'gain': 1.0,
        u'help': u'Voltage recorded from cell during current-clamp recording',
        u'namespace': u'core',
        u'neurodata_type': u'CurrentClampSeries',
        u'starting_time': 0.0,
        u'stimulus_description': u'RAMP1',
        u'sweep_number': 0
    }

    sweep_attrs = reader.get_sweep_attrs(0)
    sweep_attrs['description'] = None

    compare_dicts(sweep_attrs_ref, sweep_attrs)

    # assume the data itself is correct and replace it with None
    sweep_data_ref = {
        'response': None,
        'sampling_rate': 50000.0,
        'stimulus': None,
        'stimulus_unit': 'Amps'
    }

    sweep_data = reader.get_sweep_data(0)
    sweep_data['response'] = None
    sweep_data['stimulus'] = None

    assert sweep_data_ref == sweep_data
Exemplo n.º 14
0
def test_embed_spike_times_into_nwb(make_skeleton_nwb_file, tmpdir_factory):

    sweep_spike_times = {
        3: [56.0, 44.6, 661.1],
        4: [156.0, 144.6, 61.1, 334.944]
    }

    tmp_dir = tmpdir_factory.mktemp("embed_spikes_into_nwb")
    input_nwb_file_name = str(tmp_dir.join("input.nwb"))
    output_nwb_file_name = str(tmp_dir.join("output.nwb"))

    make_skeleton_nwb_file(input_nwb_file_name)

    embed_spike_times(input_nwb_file_name, output_nwb_file_name,
                      sweep_spike_times)

    nwb_data = nwb_reader.create_nwb_reader(output_nwb_file_name)

    for sweep_num, spike_times in sweep_spike_times.items():
        assert np.allclose(nwb_data.get_spike_times(sweep_num), spike_times)
def main(paths, sweeps, dendrite_type, bridge_avg, passive_fit_start_time,
        electrode_capacitance, junction_potential, random_seeds,
        output_json, **kwargs):
    """Main sequence of pre-processing and passive fitting"""

    # Extract Sweep objects (from IPFX package) from NWB file
    nwb_path = paths["nwb"] # nwb - neurodata without borders (ephys data)
    nwb_data = create_nwb_reader(nwb_path)
    core_1_lsq, c1_start, c1_end = sweeps_from_nwb(
        nwb_data, sweeps["core_1_long_squares"])
    core_2_lsq, c2_start, c2_end = sweeps_from_nwb(
        nwb_data, sweeps["core_2_long_squares"])

    # Choose sweeps to train the model
    sweep_set_to_fit, start, end = preprocess.select_core_1_or_core_2_sweeps(
        core_1_lsq, c1_start, c1_end,
        core_2_lsq, c2_start, c2_end)
    if len(sweep_set_to_fit.sweeps) == 0:
        ju.write(output_json, { 'error': "No usable sweeps found" })
        return


    # Calculate the target features from the training sweeps
    step_analysis = StepAnalysis(start, end)
    step_analysis.analyze(sweep_set_to_fit)
    target_info = preprocess.target_features(
        step_analysis.sweep_features(),
        step_analysis.spikes_data())

    stim_amp = step_analysis.sweep_features()["stim_amp"].values[0]
    stim_dur = end - start
    v_baseline = target_info.at["v_baseline", "mean"]

    # Determine maximum current used for depolarization block checks
    # during optimization

    # Load noise sweeps to check highest current used
    noise_1, _, _ = sweeps_from_nwb(
        nwb_data, sweeps["seed_1_noise"])
    noise_2, _, _ = sweeps_from_nwb(
        nwb_data, sweeps["seed_2_noise"])

    max_i = preprocess.max_i_for_depol_block_check(
        core_1_lsq, core_2_lsq, noise_1, noise_2)

    # Prepare inputs for passive fitting
    is_spiny = dendrite_type == "spiny"

    cap_checks, _, _ = sweeps_from_nwb(
        nwb_data, sweeps["cap_checks"])
    if len(cap_checks.sweeps) == 0:
        logging.info("No cap check traces found")
        should_run_passive_fit = False
        passive_info = {
            "should_run": False,
        }
    else:
        grand_up, grand_down, t = preprocess.cap_check_grand_averages(cap_checks)
        up_file, down_file = preprocess.save_grand_averages(
            grand_up, grand_down, t, paths["storage_directory"])
        escape_time = preprocess.passive_fit_window(grand_up, grand_down, t,
            start_time=passive_fit_start_time)
        passive_info = {
            "should_run": True,
            "bridge": bridge_avg,
            "fit_window_start": passive_fit_start_time,
            "fit_window_end": escape_time,
            "electrode_cap": electrode_capacitance,
            "is_spiny": is_spiny,
        }
        paths["up"] = up_file
        paths["down"] = down_file

    passive_info_path = os.path.join(
        paths["storage_directory"], "passive_info.json")
    ju.write(passive_info_path, passive_info)

    # Determine whether morphology has an apical dendrite
    has_apical = preprocess.swc_has_apical_compartments(paths["swc"])

    # Decide which fits to run based on morphology and AP width
    fit_types = preprocess.FitStyle.get_fit_types(
        has_apical=has_apical,
        is_spiny=is_spiny,
        width=target_info.at["width", "mean"])

    stage_1_tasks = [{"fit_type": fit_type, "seed": seed}
        for seed in random_seeds
        for fit_type in fit_types]

    stage_2_tasks = [{"fit_type": preprocess.FitStyle.map_stage_2(fit_type), "seed": seed}
        for seed in random_seeds
        for fit_type in fit_types]

    preprocess_results_path = os.path.join(
        paths["storage_directory"], "preprocess_results.json")
    ju.write(preprocess_results_path, {
        "is_spiny": is_spiny,
        "has_apical": has_apical,
        "junction_potential": junction_potential,
        "max_stim_test_na": max_i,
        "v_baseline": v_baseline,
        "stimulus": {
            "amplitude": 1e-3 * stim_amp, # to nA
            "delay": 1e3,
            "duration": 1e3 * stim_dur, # to ms
        },
        "target_features": target_info.to_dict(orient="index"),
        "sweeps": sweeps,
        "sweeps_to_fit": [s.sweep_number for s in sweep_set_to_fit.sweeps],
    })

    paths.update({
        "preprocess_results": preprocess_results_path,
        "passive_info": passive_info_path,
    })

    output = {
        "paths": paths,
        "stage_1_task_list": stage_1_tasks,
        "stage_2_task_list": stage_2_tasks,
    }

    ju.write(module.args["output_json"], output)
Exemplo n.º 16
0
def test_assumed_sweep_number_fallback(NWB_file):

    reader = create_nwb_reader(NWB_file)
    assert isinstance(reader, NwbPipelineReader)

    assert reader.get_sweep_number("Sweep_10") == 10
Exemplo n.º 17
0
def test_get_recording_date(NWB_file):
    reader = create_nwb_reader(NWB_file)

    assert "2018-03-20 20:59:48" == reader.get_recording_date()
Exemplo n.º 18
0
def test_valid_v1_full_Pipeline(fetch_pipeline_file):
    reader = create_nwb_reader(fetch_pipeline_file)
    assert isinstance(reader, NwbPipelineReader)

    sweep_names_ref = [
        u'Sweep_10', u'Sweep_12', u'Sweep_13', u'Sweep_14', u'Sweep_15',
        u'Sweep_16', u'Sweep_17', u'Sweep_19', u'Sweep_20', u'Sweep_25',
        u'Sweep_28', u'Sweep_29', u'Sweep_30', u'Sweep_32', u'Sweep_33',
        u'Sweep_34', u'Sweep_35', u'Sweep_36', u'Sweep_37', u'Sweep_38',
        u'Sweep_39', u'Sweep_40', u'Sweep_41', u'Sweep_42', u'Sweep_43',
        u'Sweep_44', u'Sweep_45', u'Sweep_46', u'Sweep_47', u'Sweep_5',
        u'Sweep_51', u'Sweep_52', u'Sweep_53', u'Sweep_54', u'Sweep_55',
        u'Sweep_57', u'Sweep_58', u'Sweep_59', u'Sweep_6', u'Sweep_61',
        u'Sweep_62', u'Sweep_63', u'Sweep_64', u'Sweep_65', u'Sweep_66',
        u'Sweep_67', u'Sweep_68', u'Sweep_69', u'Sweep_7', u'Sweep_70',
        u'Sweep_74', u'Sweep_8', u'Sweep_9'
    ]

    sweep_names = reader.get_sweep_names()
    assert sorted(sweep_names_ref) == sorted(sweep_names)

    assert reader.get_pipeline_version() == (1, 0)

    assert reader.get_sweep_number("Sweep_10") == 10

    assert reader.get_stim_code(10) == "Short Square"

    sweep_attrs_ref = {
        u'ancestry':
        np.array(['TimeSeries', 'PatchClampSeries', 'CurrentClampSeries'],
                 dtype='|S18'),
        u'comments':
        u'',
        u'description':
        u'',
        u'help':
        u'Voltage recorded from cell during current-clamp recording',
        u'missing_fields':
        np.array(['gain'], dtype='|S4'),
        u'neurodata_type':
        u'TimeSeries',
        u'source':
        u''
    }

    sweep_attrs = reader.get_sweep_attrs(10)
    compare_dicts(sweep_attrs_ref, sweep_attrs)

    # assume the data itself is correct and replace it with None
    sweep_data_ref = {
        'response': None,
        'sampling_rate': 50000.0,
        'stimulus': None,
        'stimulus_unit': 'Amps'
    }

    sweep_data = reader.get_sweep_data(10)
    sweep_data['response'] = None
    sweep_data['stimulus'] = None

    assert sweep_data_ref == sweep_data
Exemplo n.º 19
0
def test_raises_on_missing_file():
    with pytest.raises(IOError):
        create_nwb_reader('I_DONT_EXIST.nwb')