def test_count_spikes_filtered_false(good_path_and_map):
    _, good_map = good_path_and_map
    good_map = utils.abf_golay(good_map)
    spike_map = steps.count_spikes(good_map,
                                   threshold=0.25,
                                   use_filtered=False)
    assert not spike_map["peak_props"]["use_filtered?"]
def test_golay_bad_input(bad_path_and_map):
    _, bad_map = bad_path_and_map
    filt = utils.abf_golay(bad_map, 11, 3)
    assert len(filt["filtered"]) == 0
    assert (
        filt["error"][0] ==
        "filter error: If mode is 'interp', window_length must be less than or equal to the size of x."
    )
def batch_analyze_file(path, half_ms_window, degree, threshold=0.5):
    abf = utils.read_abf_IO(path, 0, 0)
    list_of_dicts = []
    for sweep in abf["sweep_list"]:
        abf = utils.abf_golay(utils.read_abf_IO(path, sweep, 0), half_ms_window, degree)
        temp = count_spikes(abf, threshold=threshold, use_filtered=True)
        final = filter_stim_indicies_cc01(temp)
        list_of_dicts.append(as_dict(final))
    return list_of_dicts
def test_count_spikes_bad_map_filtered_false(bad_path_and_map):
    _, bad_map = bad_path_and_map
    bad_map = utils.abf_golay(bad_map)
    spike_map = steps.count_spikes(bad_map, threshold=0.25, use_filtered=False)
    assert not spike_map["peak_props"]["use_filtered?"]
    assert spike_map["peak_props"] == {
        "no_data": "no_data",
        "threshold": None,
        "use_filtered?": False,
    }
# scratch for current ramp analysis
import patch_clamp.utils as utils
import patch_clamp.ramp as ramp
import patch_clamp.database as db

con = db.persistent_connection_to_db(db.DATABASE_PATH)
paths = ramp.get_ramps_from_db()
sweep = 2

path_n = 20  # path 20 sweep 0 is empty
target = utils.abf_golay(utils.read_abf_IO(paths[path_n], sweep, 0))
target = ramp.find_ramp_peaks(target)
serialized = ramp.serialize_for_db(target)

ramp.add_to_db(con, serialized)

r = con.execute("SELECT * FROM ramp").fetchall()
print(r)
# main script for analysis of ramp
import patch_clamp.archive as archive
import patch_clamp.database as db
import patch_clamp.utils as utils
import patch_clamp.ramp as ramp

SCHEMA = archive.get_schema("patch_clamp/ramp_schema.sqlite")
con, cur = archive.make_db(db.DATABASE_PATH, SCHEMA)
cur.close()
con.close()

paths = ramp.get_ramps_from_db()

con = db.persistent_connection_to_db(db.DATABASE_PATH)
for i in paths:
    current = utils.abf_golay(utils.read_abf_IO(i, 0, 0))
    current_peak = ramp.find_ramp_peaks(current)
    serialized = ramp.serialize_for_db(current_peak)
    ramp.add_to_db(con, serialized)
def test_golay(good_path_and_map):
    _, good_map = good_path_and_map
    filt = utils.abf_golay(good_map, 11, 3)
    assert filt["savgol_details"] == {"polyorder": 3, "window": 11}
    assert "filtered" in filt.keys()
Exemplo n.º 8
0
def read_and_filter(fpath, half_ms_window, degree):
    abf_file = utils.read_abf_IO(fpath, 0, 0)
    filtered = utils.abf_golay(abf_file, half_ms_window, degree)
    return filtered
# TODO:
# - make the fn to compose and build the dict to serialize to disc
# - use the previously written function to find all the CC01 files to use.
# - must bin by 1/2 ms or something and calculate spike or not spike in each 1/2 ms.
# That's how you build the raster. But first, just write the spike times out
# record all spike times. build raster plots.
# see p 31 of theoretical neuroscience
# then, start binning the data, and calculate the mean and variance for each bin.

half_ms_window = 11  # data points for filter
degree = 3  # based on Mae's paper

target = cc01paths[5]  # 5 sweep 10 is weird
##
##
abf_file = utils.abf_golay(utils.read_abf_IO(target, 22, 0), half_ms_window,
                           degree)
# abf = steps.filter_stim_indicies_cc01(
#     steps.count_spikes(abf, threshold=1, use_filtered=True)
# )
abf = steps.filter_stim_indicies_cc01(
    steps.count_spikes_simple_threshold(abf_file, use_filtered=True))
abf_v = steps.filter_stim_indicies_cc01(
    steps.count_spikes(abf_file, threshold=0.8, use_filtered=True))
p = abf["peaks"]
p2 = abf["during_stim_peaks"]

plt.plot(abf["x"], abf["filtered"])
plt.plot(abf["x"][p], abf["filtered"][p], ".")
try:
    plt.plot(abf["x"][p2], abf["filtered"][p2], "*")
except IndexError: