def get_metric_details(path, timeshift, pathname_metadata=""): metrics = load_pickle(path + "metrics_timeshift=" + str(timeshift) + pathname_metadata + ".pkl") metrics_k = load_pickle(path + "metrics_k_timeshift=" + str(timeshift) + pathname_metadata + ".pkl") nd = load_pickle(path + "nd_timeshift=" + str(timeshift) + pathname_metadata + ".pkl") licks = load_pickle(path + "licks_timeshift=" + str(timeshift) + pathname_metadata + ".pkl") lick_id_details = Lick_id_details() lick_id_details.from_metrics(nd=nd, metrics=metrics, timeshift=timeshift, licks=licks) lick_id_details_k = [] for metric in metrics_k: obj = Lick_id_details() obj.from_metrics(nd=nd, metrics=metric, timeshift=timeshift, licks=licks) lick_id_details_k.append(obj) # Calculate approximate accuracy all_guesses = load_pickle(path + "all_guesses_timeshift=" + str(timeshift) + pathname_metadata + ".pkl") approximate_accuracy = np.zeros( (len(lick_id_details.valid_licks), nd.num_wells)) for guess in all_guesses: for evaluated_sample in guess: i = evaluated_sample.lick_id - 1 prediction = evaluated_sample.prediction approximate_accuracy[i][prediction - 1] += 1 second_highest_decoded = np.zeros(len(lick_id_details.valid_licks)) sum_highest = 0 sum_second = 0 sum_total = 0 for i, event in enumerate(approximate_accuracy): # prints table of most frequent guesses by well sum_highest += np.max(event) sum_second += np.partition(event.flatten(), -2)[-2] sum_total += np.sum(event) second_largest_well = int( np.where(event == np.partition(event.flatten(), -2)[-2])[0][0] + 1) # returns well of second largest value in list if second_largest_well == licks[ i].target and lick_id_details.valid_licks[i] == 1: second_highest_decoded[i] = 1 lick_id_details.second_highest_decoded = second_highest_decoded print(sum_highest, sum_second, sum_total) # generate all_guesses_k for standard deviation return lick_id_details, lick_id_details_k
def load_position_network_output(path): dict_files = glob.glob(path + "output/" + "*.pkl") net_out = [] sorted_list = [] if len(dict_files) == 0: raise OSError("Warning: network Directory is empty") for i, file_path in enumerate(dict_files): net_dict_i = load_pickle(file_path) sorted_list.append([file_path, net_dict_i.net_data.time_shift]) sorted_list = sorted(sorted_list, key=lambda x: x[1]) dict_files = [i[0] for i in sorted_list] for file_path in dict_files: # print("processing", file_path) net_dict_i = load_pickle(file_path) net_out.append(net_dict_i) return net_out
def print_table(paths): data_collection = [] for path in paths: data_collection.append(load_pickle(path)[0:56]) data_collection = np.transpose(data_collection) for i, datarow in enumerate(data_collection): print(i + 1, end=" & ") for j, data in enumerate(datarow): if j in [0, 1, 4, 5]: # which columns are ints (wells) print(str(int(data)) + " & ", end="") else: print(format(float(data), ".1f") + " & ", end="") print(" \ ")
def plot_accuracy_inside_phase(path, shift, title, save_path, color="darkviolet"): # load accuracy data metrics = load_pickle(path + "metrics_timeshift=" + str(shift) + ".pkl") # plot chart sample_counter = np.zeros(1000) bin_values = [] accuracy_sum = np.zeros(1000) position = 0 current_phase = metrics[0].phase for i, lick in enumerate(metrics): sample_counter[position] += 1 bin_values.append(position) accuracy_sum[position] += lick.fraction_decoded position += 1 if lick.phase != current_phase: # new phase current_phase = lick.phase position = 0 # remove trailing zeros and normalize phase sample_counter = np.trim_zeros(sample_counter, 'b') accuracy_sum = np.trim_zeros(accuracy_sum, 'b') y = np.divide(accuracy_sum, sample_counter) fig, ax = plt.subplots() fontsize = 12 x = np.arange(0, len(y)) ax.plot(x, y, label='average', color=color, marker='.', linestyle="None") # ,linestyle="None" ax.legend() ax.grid(c='k', ls='-', alpha=0.3) ax.set_xlabel("Number of visits of well 1 inside phase") ax.set_ylabel("Average fraction of samples decoded correctly") ax.set_title(title) ax_b = ax.twinx() ax_b.set_ylabel("Phases with number of visits") z = np.arange(0, 12) ax_b.hist(bin_values, bins=z, facecolor='g', alpha=0.2) # plt.show() plt.savefig(save_path) pass
def print_average_overlap(paths, filter_factor=0.0): """ :param paths: list of paths of the two neural activity output files produced by this module :param filter_factor: keeps fraction of neurons with highest firing rate and filters out rest :return: """ data_collection_i = [] for path in paths: data_collection_i.append(load_pickle(path)[0:56]) data_collection = (list(zip(data_collection_i[0], data_collection_i[1]))) spikerate_collection = (data_collection_i[2] + data_collection_i[3]) sorted_collection = spikerate_collection.copy() sorted_collection.sort() # max_spikerate = sorted_collection[int(filter_factor * len(spikerate_collection))] counter = [] for i, data_row in enumerate(data_collection): if spikerate_collection[i] > max_spikerate: counter.append(data_row[0] == data_row[1]) print(np.average(counter)) return np.average(counter)
# speed_list_hc, spike_rate_list_hc = return_bar_values(nd, lickstart, lickstop, resolution) # nd.filtered_data_path = "session_pfc_lw.pkl" # speed_list_pfc, spike_rate_list_pfc = return_bar_values(nd, lickstart, lickstop, resolution) # # with open("speed_list_hc", 'wb') as f: # pickle.dump(speed_list_hc, f) # with open("spike_rate_list_hc", 'wb') as f: # pickle.dump(spike_rate_list_hc, f) # # with open("speed_list_pfc", 'wb') as f: # pickle.dump(speed_list_pfc, f) # with open("spike_rate_list_pfc", 'wb') as f: # pickle.dump(spike_rate_list_pfc, f) speed_list_hc = load_pickle("speed_list_hc") speed_list_pfc = load_pickle("speed_list_pfc") spike_rate_list_hc = load_pickle("spike_rate_list_hc") spike_rate_list_pfc = load_pickle("spike_rate_list_pfc") # plot results fig, ((ax1, ax3), (ax2, ax4)) = plt.subplots(2, 2) ax1.plot(time_ind, speed_list_hc, color="b", label="HC") # label="cv "+str(i+1)+"/10", ax1.set_ylabel("speed [cm/s]", fontsize=fontsize) ax1.xaxis.set_major_locator(plt.MaxNLocator(3)) ax1.yaxis.set_major_locator(plt.MaxNLocator(3)) ax1.legend(fontsize=fontsize) ax2.plot(time_ind, spike_rate_list_hc, color="b") # label="cv "+str(i+1)+"/10", ax2.set_ylabel("spikes/s", fontsize=fontsize)
def print_metric_details(path, timeshift, pathname_metadata=""): """ :param path: directory of network files, should be filled :param timeshift: +1 or - 1 for future or past decoding files :param pathname_metadata: if there were multiple experiments in one directory, this can be used to distinguish this by appending to the end of the searched file names :return: prints metric details and details by lick """ path = path + "output/" # Create binary arrays for licks corresponding to each inspected filter metrics = load_pickle(path + "metrics_timeshift=" + str(timeshift) + pathname_metadata + ".pkl") nd = load_pickle(path + "nd_timeshift=" + str(timeshift) + pathname_metadata + ".pkl") licks = load_pickle(path + "licks_timeshift=" + str(timeshift) + pathname_metadata + ".pkl") lick_id_details = Lick_id_details() lick_id_details.from_metrics(nd=nd, metrics=metrics, timeshift=timeshift, licks=licks) latex_table = [] latex_table.append([ "event count", " correct guesses", "false guesses", "fraction correct", "decoded next phase", "decoded last phase", "decoded current phase", "fraction current phase", "decoded next lick", "decoded last lick" ]) top_row = [ "all licks", "correct licks", "false licks", "next well licked is 2", "next well licked is 3", "next well licked is 4", "next well licked is 5" ] # print("Filter: all licks") # latex_table.append(lick_id_details.print_details()) # print("Filter: correct licks") # lick_id_details.filter = lick_id_details.target_lick_correct # latex_table.append(lick_id_details.print_details()) # print("Filter: false licks") # lick_id_details.filter = lick_id_details.target_lick_false # latex_table.append(lick_id_details.print_details()) # # print("Filter: licks prior to switch") # # lick_id_details.filter = lick_id_details.licks_prior_to_switch # # latex_table.append(lick_id_details.print_details()) # # print("Filter: licks after switch") # # lick_id_details.filter = lick_id_details.licks_after_switch # # latex_table.append(lick_id_details.print_details()) # print("Filter: next well licked is 2") # lick_id_details.filter = lick_id_details.next_well_licked_2 # latex_table.append(lick_id_details.print_details()) # print("Filter: next well licked is 3") # lick_id_details.filter = lick_id_details.next_well_licked_3 # latex_table.append(lick_id_details.print_details()) # print("Filter: next well licked is 4") # lick_id_details.filter = lick_id_details.next_well_licked_4 # latex_table.append(lick_id_details.print_details()) # print("Filter: next well licked is 5") # lick_id_details.filter = lick_id_details.next_well_licked_5 # latex_table.append(lick_id_details.print_details()) # latex_table_2 = [[latex_table[j][i] for j in range(len(latex_table))] for i in range(len(latex_table[0]))] # transpose table # for i,row in enumerate(latex_table_2): # for j,item in enumerate(row): # if j in [0]: # first column is string # print(item ,end=" & ") # else: # if i in [0,1,2,4,5,6,8,9]: # print(int(item),end= " & ") # else: # print(format(float(item), ".1f") ,end=" & ") # print(" \ ") print_lickwell_metrics(metrics, nd, licks)
only_phase_change_trials = False by_sample = True path = "C:/Users/NN/Desktop/Master/experiments/Lickwell_prediction/" model_path_list = [ "C:/Users/NN/Desktop/Master/experiments/Experiments for thesis 2/well decoding/hc/"] image_title_list = ["pfc","hc"] fontsize = 24 rc('font',**{'family':'serif','serif':['Palatino']}) rc('text', usetex=True) rc('xtick', labelsize=fontsize) rc('ytick', labelsize=fontsize) rc('axes', labelsize=fontsize) if by_sample is False: for timeshift in [1]: for j,path in enumerate(model_path_list): metrics = load_pickle(path + "output/metrics_timeshift=" + str(timeshift) + ".pkl") metrics_k = load_pickle(path + "output/metrics_k_timeshift=" + str(timeshift) + ".pkl") nd = load_pickle(path + "output/nd_timeshift=" + str(timeshift) + ".pkl") licks = load_pickle(path + "output/licks_timeshift=" + str(timeshift) + ".pkl") array = np.zeros((4,4)) metrics_flattened = [item for sublist in metrics_k for item in sublist] # metrics_a = load_pickle(model_path_list[0] + "output/metrics_k_timeshift=" + str(timeshift) + ".pkl") # metrics_b = load_pickle(model_path_list[1] + "output/metrics_k_timeshift=" + str(timeshift) + ".pkl") # metrics_af = np.reshape(metrics_a,-1) # metrics_bf = np.reshape(metrics_b,-1)
def init(): original = load_pickle("C:/Users/NN/AppData/Local/Temp/animation/target") im.set_data(np.random.random((5, 5))) return [im]