Пример #1
0
                                              condition_config)
            else:
                raise ValueError('sorting method is not recognized')
            sorted_list_of_psths = [x[ixs, :] for x in list_of_psths]
            psth = psth[ixs, :]
        else:
            sorted_list_of_psths = list_of_psths

        images.append(psth)
        odor_on_times.append(odor_on)
        water_on_times.append(water_on)
        list_of_odor_names.append(odors_copy)
    return images, odor_on_times, water_on_times, list_of_odor_names, sorted_list_of_psths


res = analysis.load_data(data_path)
analysis.add_indices(res)
analysis.add_time(res)
if condition_config.plot_big:
    mice = np.unique(res['mouse'])
    list_of_days_per_mouse = condition_config.plot_big_days
    list_of_images = []
    list_of_list_of_psths = []
    odor_on_times = []
    water_on_times = []
    days = [int(not condition_config.plot_big_naive)]
    for i, day in enumerate(list_of_days_per_mouse):
        mouse = mice[i]
        if day != -1:
            images, odor_on_times, water_on_times, list_of_odor_names, list_of_psth = helper(
                res, mouse, [day], condition_config)
Пример #2
0
print(baseNum)
np.savetxt('baseNumbersUsed.txt', baseNum)
#==============================================================================================================
# Run Base Odor and Mixture Odor Simulations
#==============================================================================================================
ex.createData(run_params_train, I_arr, states, net)  # base odors
for rowNum, aCombo in enumerate(baseNum):
    ex.mixtures3(run_params_test,
                 [I_arr[aCombo[0]], I_arr[aCombo[1]], I_arr[aCombo[2]]],
                 rowNum, states, net)  # mixture odors

#==============================================================================================================
# Load in Base and Mixture Data
#==============================================================================================================
#load in the data from disk
spikes_t_arr, spikes_i_arr, I_arr, trace_V_arr, trace_t_arr, label_arr = anal.load_data(
    tr_prefix, num_runs=num_odors_train)

#Counter
number_Of_Mix_Runs = 0  # necessary to increment like this because it is not known without first dividing the grid up
listOfAlpha = []
listOfBeta = []
for i in range(num_alpha):
    for j in range(num_alpha):
        A_arr = np.linspace(0, 1, num_alpha)
        B_arr = np.linspace(0, 1, num_alpha)
        if A_arr[i] + B_arr[j] <= 1:  # Only using normalized probabilities
            listOfAlpha.append(A_arr[i])
            listOfBeta.append(B_arr[j])
            number_Of_Mix_Runs += 1

spikes_t_test_arr_for_combo = 5 * [None]
Пример #3
0
run_params_test = dict(num_odors=num_odors_mix,
                       num_trials=num_alpha,
                       prefix=te_prefix,
                       inp=inp,
                       noise_amp=noise_test,
                       run_time=run_time,
                       N_AL=N_AL,
                       train=False)

# --------------------------------------------------------------
# run the simulation and save to disk
ex.createData(run_params_train, I_arr, states, net)
ex.mixtures2(run_params_test, I_arr[:num_odors_mix], states, net)

# load in the data from disk
spikes_t_arr, spikes_i_arr, I_arr, trace_V_arr, trace_t_arr, label_arr = anal.load_data(tr_prefix,
                                                                                        num_runs=num_odors_train)
spikes_t_test_arr, spikes_i_test_arr, I_test_arr, test_V_arr, test_t_arr, label_test_arr = anal.load_data(te_prefix,
                                                                                                          num_runs=num_alpha * num_test)

X = np.hstack(trace_V_arr).T

# normalize training data
mini = np.min(X)
maxi = np.max(X)
X = anal.normalize(X, mini, maxi)

y = np.hstack(label_arr)

# train the SVM
clf = anal.learnSVM(X, y)
Пример #4
0

mixing_fractions = np.linspace(0,1,num=11)
for reward in r_schema:
    for stimulus in u_schema:
        moniker = '%s-%s'%(reward,stimulus)
        print moniker
        print ''
        simulation = Network(N=N,duration=2500,downsampling=1, mixing_fraction=mixing_fractions,r_schema=reward,u_schema=stimulus)

        active_directory = simulation.basedir
        results = [filename for filename in os.listdir(active_directory) if 'results' in filename]
        accuracy = np.zeros((len(mixing_fractions),N['memories']))
        energies = np.zeros((len(mixing_fractions),N['memories']))
        for i,(results_filename,fraction) in enumerate(zip(results,mixing_fractions)):
            data = postdoc.load_data(os.path.join(active_directory,results_filename))
            accuracy[i,:] = postdoc.accuracy_figure(data,savename=os.path.join(active_directory,'accuracy-%s-%s')%(str(int(fraction*10)),moniker))
            # energies[i,:] = postdoc.energy_figure(data,savename=os.path.join(active_directory,'energy-%s')%str(int(fraction*10)))
            #postdoc.correlation_visualization(data,savename =os.path.join(active_directory,'correlations-%s')%str(int(fraction*10)))
            visualization.track_matrices(data['M'],savename=os.path.join(active_directory,'M-change-%s')%(str(int(fraction*10)),moniker))
            visualization.memory_stability(data['memory_stability'],savename=os.path.join(active_directory,'M-stability-%s')%(str(int(fraction*10)),moniker))
            visualization.network_stability(data['network_stability'],savename=os.path.join(active_directory,'network-stability-%s')%(str(int(fraction*10)),moniker))
            #Don't forget about this.
        correl = postdoc.sensitivities(mixing_fractions,accuracy.transpose(), savename = os.path.join(active_directory,'sensitivities-%s'%moniker))
        #Transpose so that the x-axis contains mixing fraction and y-axis accuracy
        #print correl
del simulation

'''
        TODO: 
                1. Exploring sensitvity of recall to changes in M and initial conditions (mixing parameter) 
Пример #5
0
class MPFC_TWO_PHASE:
    # naive odor presentation: day 0
    # pretraining start: day 1
    # pretraining fully learned: day 3
    # discrimination start day: day 4
    # discrimination fully learned: day 5
    path = os.path.join(rootdir,
                        'MPFC_TWO_PHASE')  #specify path on your local machine
    pretraining_odors = ['oct']
    discrimination_odors = ['pin', 'msy', 'euy', 'lim']
    csp = ['oct', 'pin', 'msy']


condition = OFC_SINGLE_PHASE()  #input your condition

res = analysis.load_data(condition.path)
analysis.add_indices(res)
analysis.add_time(res)

for i, data in enumerate(res['data']):
    frames_per_trial = res['TRIAL_FRAMES'][i]
    data_r = utils.reshape_data(data,
                                nFrames=frames_per_trial,
                                cell_axis=0,
                                trial_axis=1,
                                time_axis=2)
    res['data'][i] = data_r

print(res.keys())
print(res['day'])
day = 1
Пример #6
0
                        train = False)

param_labels = ['N_AL', 'in_AL', 'PAL', 'inj', 'eta', 'num_train', 'num_test', 'runtime', 'p_inj']
param_values = [N_AL, in_AL, PAL, inp, noise_test, num_train, num_test, run_time, p_inj]


ex.createData(run_params_train, I_arr, states, net)
ex.createData(run_params_test, I_arr, states, net)

# This saves parameters used in the test folders
#np.savetxt(te_prefix + 'params.txt',list(zip(param_labels,param_values)),fmt='%s')


#""" PCA/SVM Code

_, _, _, trace_V_arr, _, label_arr = anal.load_data(tr_prefix, num_runs = num_odors*num_train)
_, _, _, test_V_arr, _, label_test_arr = anal.load_data(te_prefix, num_runs = num_odors*num_test)

pca = True #do PCA on output or not

if pca: #do PCA on the output
    pca_dim = 2
    pca_arr, PCA = anal.doPCA(trace_V_arr, k = pca_dim)

    X = np.hstack(pca_arr).T
else:
    X = np.hstack(trace_V_arr).T


#normalize
mini = np.min(X)