end = (p + 1) * time_points_buffer

    x = spike_info['times'].values[np.logical_and(spike_info['times'] < end,
                                                  spike_info['times'] > start)]
    templates = spike_info['template_after_sorting'].values[np.logical_and(
        spike_info['times'] < end, spike_info['times'] > start)]
    y = []
    for t in templates:
        y.append(template_info[template_info['template number'] == t]
                 ['position Y'].values[0])
    y = np.array(y) * const.POSITION_MULT
    return x, y


raster = None
tr.connect_repl_var(globals(), 'pane', 'raster', 'raster_from_pane')

osv.graph(globals(), 'y', 'x', True)

# Calculating firing rates using arbitrary time windows
'''
seconds_in_averaging_window = 0.5
averaging_window = int(seconds_in_averaging_window * const.SAMPLING_FREQUENCY)
num_of_windows = int(ap_data.shape[1] / averaging_window)
spike_rates = np.zeros((len(template_info), num_of_windows))


for t_index in np.arange(len(template_info)):
    template_index = template_info['template number'].iloc[t_index]
    spike_times_in_template = spike_info[spike_info['template_after_sorting'] == template_index]['times'].values
    sync_funcs.get_time_points_of_events_in_sync_file(data_folder, clean=True,
                                                      cam_ttl_pulse_period=
                                                      const.CAMERA_TTL_PULSES_TIMEPOINT_PERIOD)
points_per_pulse = np.mean(np.diff(camera_pulses))

camera_frames_in_video = csv_funcs.get_true_frame_array(data_folder)
time_point_of_first_video_frame = camera_pulses[camera_frames_in_video][0]


def time_point_to_frame(x):
    return sync_funcs.time_point_to_frame(time_point_of_first_video_frame,
                                          camera_frames_in_video,
                                          points_per_pulse, x)


tr.connect_repl_var(globals(), 'pointer', 'video_frame', 'time_point_to_frame')

# -----------------------------------------------------------------------------
# DETECTING SUBTHRESHOLD SPIKES WITH INSTANTANEOUS PHASE ----------------------


def butter_bandpass(lowcut, highcut, fs, order=5):
    nyq = 0.5 * fs
    low = lowcut / nyq
    high = highcut / nyq
    sos = butter(order, [low, high], analog=False, btype='band', output='sos')
    return sos


def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
    sos = butter_bandpass(lowcut, highcut, fs, order=order)
sv.graph_range(globals(), 'sound_start', 'sound_step', 'rec2_sound')



def sync_start_to_sound_start(sync_start):
    sound_start = int(sync_start * sound_to_sync_sampling_ratio + offset)
    if sound_start < 0:
        sound_start = 0
    return sound_start


def sync_step_to_sound_step(sync_step):
    return int(sync_step * sound_to_sync_sampling_ratio)


tr.connect_repl_var(globals(), 'sync_start', 'sound_start', 'sync_start_to_sound_start')
tr.connect_repl_var(globals(), 'sync_step', 'sound_step', 'sync_step_to_sound_step')


# Calculate manually the different offsets
# Get the long stretches of no sound in the sync file

sync_signal_start = 2168624
sync_signal_end = 43394819
baseline = 5
smallest_sound = 6
times_between_pips = 2380
length_of_six_pip_sound = 12000

# test = sync_diff[sync_signal_start:sync_signal_end]
test = sync_diff[sync_signal_start:]
示例#4
0
markers_positions_no_large_movs = pd.read_pickle(
    join(dlc_project_folder, 'post_processing',
         'markers_positions_no_large_movs.df'))

frame = 3
global marker_dots
marker_dots = np.zeros((640, 640, 4))
output = None
overlay_dots.marker_dots = marker_dots
marker_size = 3
markers_positions_no_large_movs_numpy = markers_positions_no_large_movs.values
args = [markers_positions_no_large_movs_numpy, marker_size]
update_markers_for_video = overlay_dots.update_markers_for_video

tr.connect_repl_var(globals(), 'frame', 'output', 'update_markers_for_video',
                    'args')
sv.image_sequence(globals(), 'frame', 'marker_dots', 'full_video_file')

# -------------------------------------------------
# GET THE BODY POSITIONS

# Get the body markers from the cleaned markers
# Then interpolate the nans of the TailBase
# Finally average the body markers to get the body position
updated_body_markers = dlc_pp.seperate_markers(markers_positions_no_large_movs,
                                               body_parts)
tail_base = updated_body_markers.loc[:,
                                     updated_body_markers.columns.
                                     get_level_values(1) == 'TailBase']
tail_base_cleaned = dlc_pp.clean_dlc_outpout(join(dlc_project_folder,
                                                  'post_processing',
示例#5
0
    global previous_template_number
    global result
    sys.stdout = result
    string = result.getvalue()
    new = string[-200:]

    try:
        template_number = int(
            new[new.find('Template number'):new.find('Template number') +
                22][18:22])
        if template_number != previous_template_number:
            template = template_info[template_info['template number'] ==
                                     template_number]
            figure.clear()
            ax = figure.add_subplot(111)
            try:
                ax.plot(np.squeeze(avg_templates[template.index.values]).T)
            except:
                pass
        previous_template_number = template_number
        figure.suptitle('Template = {}, with {} number of spikes'.format(
            str(template_number), str(template['number of spikes'].values[0])))
    except:
        template_number = None
    return template_number


tr.connect_repl_var(globals(), 'f', 'template_number', 'show_average_template')
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
示例#6
0
                     tsne_spike_rates_count_pcs_0p1[start + s:start + s + 2,
                                                    1],
                     c=colors[s])
    return None


fig_scat = plt.figure(0)
ax1 = fig_scat.add_subplot(111)
ax1.imshow(tsne_pcs_count_cn_image,
           extent=tsne_pcs_count_cn_image_extent,
           aspect='auto')
ax2 = ax1.twinx()
seconds_to_track = 1
out = None
args = [ax1, ax2, seconds_to_track]
tr.connect_repl_var(globals(), 'frame', 'out', 'phase_space', 'args')

fig_plot = plt.figure(1)
fig_plot_zoom = plt.figure(2)

# </editor-fold>
# -------------------------------------------------

# -------------------------------------------------
# <editor-fold desc="MAKE VIDEO OF RAT VIDEO WITH THE TSNE">

opencv_rat_video = cv2.VideoCapture(video_file)
total_frames = int(opencv_rat_video.get(cv2.CAP_PROP_FRAME_COUNT))

frame_size_of_video_out = (int(2976), int(1549))
dpi = 100
osv.image(globals(), 'data2', 'im_l', 'cm', 'op', number_of_images=1)

osv.graph(globals(), 'data2')

import transform as tr

a = 1
b = 2
c = 3


def t(a, b):
    return a + b


tr.connect_repl_var(globals(), 'a', 'c', 't', 'b')
'''
import drop_down as dd
e = [1,2,3,4]
dd.connect_repl_var(globals(), 'e', 't', 'c', 'b')
'''

import slider as s
s.connect_repl_var(globals(), 'a', 'c', 't', 'b', slider_limits=[0, 200])

import pandas as pd

events_file = r'Y:\swc\kampff\George\DataAndResults\Experiments\Awake\NeuroSeeker\AK_47.2\2019_06_29-11_36\Events_worked_on.csv'

events = pd.read_csv(events_file,
                     parse_dates=[0],
# Look at the whole video with body trajectory superimposed
traj = np.zeros((640, 640, 4))
frame = 3
output = None


def update_trajectory_for_video(frame):
    traj[:, :, :] = 0
    bp = body_positions.astype(int)
    bp[:, 1] = 640 - bp[:, 1]
    traj[bp[:frame, 1], bp[:frame, 0], :] = 255
    return output


tr.connect_repl_var(globals(), 'frame', 'output',
                    'update_trajectory_for_video')
sv.image_sequence(globals(), 'frame', 'full_video_file', 'traj')
# ------------------------------------------------------

space_occupancy = binning.bin_2d_array(body_positions, bins=[10, 10])

max_pix_per_frame = 20  # Rats run up to 13Km/h = 20 pixels/frame
bins = [
    np.logspace(np.log10(0.0001),
                np.log10(max_pix_per_frame * conversion_const),
                30,
                base=10), 20
]
velocity_categories = binning.bin_2d_array(body_velocities_polar, bins=bins)
speed_categories = pd.cut(body_velocities_polar[:, 0], bins=bins[0])
movement_categories = pd.cut(
示例#9
0
    (spike_data_fd, np.zeros((spike_data.shape[0], spike_data.shape[1], 1))),
    axis=2)
seq_v.graph_pane(globals(), 's', 'spike_data_fd')

spike_data_sd = np.diff(spike_data_fd, axis=2)
spike_data_sd = np.concatenate(
    (spike_data_sd, np.zeros((spike_data.shape[0], spike_data.shape[1], 1))),
    axis=2)

largest_channel_index = np.squeeze(
    np.argwhere(large_channels_full == large_channels[0]))


def get_phase_x_of_channel_for_spike(s):
    return spike_data_sd[s, largest_channel_index, :]


def get_phase_y_of_channel_for_spike(s):
    return spike_data_fd[s, largest_channel_index, :]


phase_x = None
phase_y = None

tr.connect_repl_var(globals(), 's', 'phase_x',
                    'get_phase_x_of_channel_for_spike')
tr.connect_repl_var(globals(), 's', 'phase_y',
                    'get_phase_y_of_channel_for_spike')

s = 1
one_s_v.graph(globals(), 'phase_y', 'phase_x')
        ax.plot(Y[fr_smooth - step:fr_smooth + step])
        ax.plot(Y_pred[fr_smooth - step:fr_smooth + step])
        ax.plot(scores_test_periods_cont_norm[fr_smooth - step:fr_smooth +
                                              step])
        ax.vlines(x=step, ymin=-1.2, ymax=3)
        ax.hlines(y=r2_norm_threshold, xmin=0, xmax=2 * step)


fig = plt.figure(3)
ax = fig.add_subplot(111)
args = [ax]
out_tr = None

sv.image_sequence(globals(), 'frame', 'video_file')
dd.connect_repl_var(globals(), 'frame_starts_of_periods', 'frame')
tr.connect_repl_var(globals(), 'frame', 'is_period_tracked',
                    'is_frame_tracked')
tr.connect_repl_var(globals(), 'frame', 'out_tr', 'show_distances', 'args')
# </editor-fold>

# </editor-fold>
# -------------------------------------------------

# -------------------------------------------------
# <editor-fold desc="DO THE MIs BETWEEN THE SPIKE RATES AND THE QUALITY OF PREDICTION OF THE DISTANCE TO TRAVEL TO POKE (RUN ONCE)">

n = 0
mi_spike_rates_vs_quality_of_regression_to_travel_to_poke = []
for rate in spike_rates_away_from_poke_close_to_poke_smoothed:
    mi_spike_rates_vs_quality_of_regression_to_travel_to_poke.append(
        MI.mi_LNC([rate.tolist(), list(scores_test_cont)],
                  k=10,
channels_heights = ns_funcs.get_channels_heights_for_spread_calulation(
    lfp_channels_on_probe)
bad_lfp_channels = [35, 36, 37]
lfp_channels_used = np.delete(
    np.arange(const.NUMBER_OF_LFP_CHANNELS_IN_BINARY_FILE), bad_lfp_channels)


def spread_lfp_pane(p):
    pane = lfp_data_panes[p, :, :]
    spread = ns_funcs.spread_data(pane, channels_heights, lfp_channels_used)
    spread = np.flipud(spread)
    return spread


pane_data = None
tr.connect_repl_var(globals(), 'pane', 'pane_data', 'spread_lfp_pane')

one_v.graph(globals(), 'pane_data')


camera_pulses, beam_breaks, sounds = \
    sync_funcs.get_time_points_of_events_in_sync_file(data_folder, clean=True,
                                                      cam_ttl_pulse_period=
                                                      const.CAMERA_TTL_PULSES_TIMEPOINT_PERIOD)
points_per_pulse = np.mean(np.diff(camera_pulses))

camera_frames_in_video = csv_funcs.get_true_frame_array(data_folder)
time_point_of_first_video_frame = camera_pulses[camera_frames_in_video][0]

video_frame = 0
video_file = join(data_folder, 'Video.avi')
示例#12
0
bad_lfp_channels = []
lfp_channels_used = np.delete(
    np.arange(const.NUMBER_OF_LFP_CHANNELS_IN_BINARY_FILE), bad_lfp_channels)

pane = 100


def spread_lfp_pane(p):
    pane = lfp_data_panes[p, :, :]
    spread = ns_funcs.spread_data(pane, channels_heights, lfp_channels_used)
    spread = np.flipud(spread)
    return spread


pane_data = None
tr.connect_repl_var(globals(), 'pane', 'pane_data', 'spread_lfp_pane')

one_v.graph(globals(), 'pane_data')


def do_nothing(p):
    return p


nothing = None
slider_limits = [0, lfp_data_panes.shape[0] - 1]
sl.connect_repl_var(globals(), 'pane', 'nothing', slider_limits=slider_limits)

# ----------------------------------------------------------------------------------------------------------------------
# SUBSAMPLE THE LFPS WITH DIFFERENT RATIOS AND SAVE THE FILES
# ----------------------------------------------------------------------------------------------------------------------
# Show the sync trace gui
sync_range = 2000
sync_point = time_point_of_first_video_frame - int(sync_range / 2)
sv.graph_range(globals(), 'sync_point', 'sync_range', 'sync')

# ----------------------------------


# Connect the video gui to the sync trace gui
def time_point_to_frame(x):
    return sync_funcs.time_point_to_frame(time_point_of_first_video_frame,
                                          camera_frames_in_video,
                                          points_per_pulse, x)


tr.connect_repl_var(globals(), 'sync_point', 'video_frame',
                    'time_point_to_frame')

# ----------------------------------


# Connect the sync trace gui to the video gui
def frame_to_time_point(x):
    return sync_funcs.frame_to_time_point(time_point_of_first_video_frame,
                                          camera_frames_in_video,
                                          points_per_pulse, x)


tr.connect_repl_var(globals(), 'video_frame', 'sync_point',
                    'frame_to_time_point')
# ----------------------------------
示例#14
0
global traj_x
traj_x = 0
global traj_y
traj_y = 0


def update_trajectory(f):
    global traj_x
    global traj_y
    traj_x = body_positions[:f, 0]
    traj_y = body_positions[:f, 1]
    return body_positions[:f, :]


traj = None
tr.connect_repl_var(globals(), 'frame', 'traj', 'update_trajectory')

osv.graph(globals(), 'traj_y', 'traj_x')
#  -------------------------------------------------
'''
#  -------------------------------------------------
# FITTING THE MARKERS TO GET BETTER ESTIMATES OF THE LOW LIKELIHOOD ONES
# Fitting 2d surface using multiple markers
# DID NOT WORK

body_markers_positions = markers.loc[:, markers.columns.get_level_values(1).isin(body_parts)]
body_markers_positions = body_markers.loc[:, body_markers.columns.get_level_values(2).isin(['x', 'y'])]


t = np.reshape(body_markers_positions.loc[:3605*120-1, :].values, (3605, 120, 6))
sec = 0
pane = 0
sequence_viewer.graph_pane(globals(), 'pane', 'data1')
# The pane viewer shows te last one or two dimensions of a 2d or 3d data set and itterates over the first one.


# Connect two guis
def pos1_to_pos2(pos1):
    if pos1 >= 0 and pos1 <= 4000:
        return int(pos1 / 10)
    elif pos1 < 0:
        return 0
    elif pos1 > 4000:
        return 400


transform.connect_repl_var(globals(), 'position1', 'pos1_to_pos2', 'position2')
# Press the Transform to deactivate the function from running and so stop the connection
# If the function passed to the transform returns bool then you get nice red/green leds for False and True


# SOME NOTES
# The variables in the command line are always connected to the guis. After you change them in the guys look for their
# values in the repl.
# Change the different values of the variables in the repl to see the guis update.


# The other guis available at the moment are
# 1) One more types of sequence_viewer for images or frames of video (you can pass the name of a video in the
# image_sequence function, or a 2d numpy array).
# 2) The two types of one_shot_viewers (graph and image) where the shown data gets updated by the repl and not by the gui
# 3) The video_viewer where a video can be played at real time (but only gives out frame keys and not individual frames)