def standard_correlation_by_epochs(est,val,modelspecs,epochs_list, rec=None): #Does the same thing as standard_correlation, excpet with subsets of data #defined by epochs_list #To use this, first add epochs to define subsets of data. #Then, pass epochs_list as a list of subsets to test. #For example, ['A', 'B', ['A', 'B']] will measure correlations separately # for all epochs marked 'A', all epochs marked 'B', and all epochs marked # 'A'or 'B' for epochs in epochs_list: # Create a label for this subset. If epochs is a list, join elements with "+" epoch_list_str="+".join([str(x) for x in epochs]) # Make a copy for this subset val_copy=copy.deepcopy(val) for vc in val_copy: vc['resp']=vc['resp'].select_epochs(epochs) est_copy=copy.deepcopy(est) for ec in est_copy: ec['resp']=ec['resp'].select_epochs(epochs) # Compute scores for validation data r_test = [nmet.corrcoef(p, 'pred', 'resp') for p in val_copy] mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val_copy] ll_test = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in val_copy] r_floor = [nmet.r_floor(p, 'pred', 'resp') for p in val] if rec is not None: r_ceiling = [nmet.r_ceiling(p, rec, 'pred', 'resp') for p in val_copy] # Repeat for est data. r_fit = [nmet.corrcoef(p, 'pred', 'resp') for p in est_copy] mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est_copy] ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est_copy] #Avergage modelspecs[0][0]['meta'][epoch_list_str]={} modelspecs[0][0]['meta'][epoch_list_str]['r_test'] = np.mean(r_test) modelspecs[0][0]['meta'][epoch_list_str]['mse_test'] = np.mean(mse_test) modelspecs[0][0]['meta'][epoch_list_str]['ll_test'] = np.mean(ll_test) modelspecs[0][0]['meta'][epoch_list_str]['r_fit'] = np.mean(r_fit) modelspecs[0][0]['meta'][epoch_list_str]['r_floor'] = np.mean(r_floor) if rec is not None: modelspecs[0][0]['meta'][epoch_list_str]['r_ceiling'] = np.mean(r_ceiling) modelspecs[0][0]['meta'][epoch_list_str]['mse_fit'] = np.mean(mse_fit) modelspecs[0][0]['meta'][epoch_list_str]['ll_fit'] = np.mean(ll_fit) return modelspecs
def correlation_per_model(est, val, modelspecs, rec=None): ''' Expects the lengths of est, val, and modelspecs to match since est[i] should have been evaluated on the fitted modelspecs[i], etc. Similar to standard_correlation, but saves correlation information to every first-module 'meta' entry instead of saving an average to only the first modelspec ''' if not len(est) == len(val) == len(modelspecs): raise ValueError( "est, val, and modelspecs should all be lists" " of equal length. got: %d, %d, %d respectively.", len(est), len(val), len(modelspecs)) modelspecs = copy.deepcopy(modelspecs) r_tests = [nmet.corrcoef(v, 'pred', 'resp') for v in val] #se_tests = [np.std(r)/np.sqrt(len(v)) for r, v in zip(r_tests, val)] mse_tests = [nmet.nmse(v, 'pred', 'resp') for v in val] ll_tests = [nmet.likelihood_poisson(v, 'pred', 'resp') for v in val] r_fits = [nmet.corrcoef(e, 'pred', 'resp') for e in est] #se_fits = [np.std(r)/np.sqrt(len(v)) for r, v in zip(r_fits, val)] mse_fits = [nmet.nmse(e, 'pred', 'resp') for e in est] ll_fits = [nmet.likelihood_poisson(e, 'pred', 'resp') for e in est] r_floors = [nmet.r_floor(v, 'pred', 'resp') for v in val] if rec is None: r_ceilings = [None] * len(r_floors) else: r_ceilings = [nmet.r_ceiling(v, rec, 'pred', 'resp') for v in val] for i, m in enumerate(modelspecs): m[0]['meta'].update({ 'r_test': r_tests[i], #'se_test': se_tests[i], 'mse_test': mse_tests[i], 'll_test': ll_tests[i], 'r_fit': r_fits[i], #'se_fit': se_fits[i], 'mse_fit': mse_fits[i], 'll_fit': ll_fits[i], 'r_floor': r_floors[i], 'r_ceiling': r_ceilings[i], }) return modelspecs
def standard_correlation_by_set(est, val, modelspecs): # Compute scores for validation data r_test = [nmet.corrcoef(p, 'pred', 'resp') for p in val] mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val] ll_test = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in val] # Repeat for est data. r_fit = [nmet.corrcoef(p, 'pred', 'resp') for p in est] mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est] ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est] for i in range(len(modelspecs)): modelspecs[i][0]['meta']['r_test'] = r_test[i] modelspecs[i][0]['meta']['mse_test'] = mse_test[i] modelspecs[i][0]['meta']['ll_test'] = ll_test[i] modelspecs[i][0]['meta']['r_fit'] = r_fit[i] modelspecs[i][0]['meta']['mse_fit'] = mse_fit[i] modelspecs[i][0]['meta']['ll_fit'] = ll_fit[i] return modelspecs
def standard_correlation(est, val, modelspecs): # Compute scores for validation data r_test = [nmet.corrcoef(p, 'pred', 'resp') for p in val] mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val] ll_test = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in val] # Repeat for est data. r_fit = [nmet.corrcoef(p, 'pred', 'resp') for p in est] mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est] ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est] modelspecs[0][0]['meta']['r_test'] = np.mean(r_test) modelspecs[0][0]['meta']['mse_test'] = np.mean(mse_test) modelspecs[0][0]['meta']['ll_test'] = np.mean(ll_test) modelspecs[0][0]['meta']['r_fit'] = np.mean(r_fit) modelspecs[0][0]['meta']['mse_fit'] = np.mean(mse_fit) modelspecs[0][0]['meta']['ll_fit'] = np.mean(ll_fit) return modelspecs
def standard_correlation(est, val, modelspecs, rec=None): # Compute scores for validation dat r_ceiling = 0 if type(val) is not list: r_test, se_test = nmet.j_corrcoef(val, 'pred', 'resp') r_fit, se_fit = nmet.j_corrcoef(est, 'pred', 'resp') r_floor = nmet.r_floor(val, 'pred', 'resp') if rec is not None: # print('running r_ceiling') r_ceiling = nmet.r_ceiling(val, rec, 'pred', 'resp') mse_test = nmet.j_nmse(val, 'pred', 'resp') mse_fit = nmet.j_nmse(est, 'pred', 'resp') elif len(val) == 1: r_test, se_test = nmet.j_corrcoef(val[0], 'pred', 'resp') r_fit, se_fit = nmet.j_corrcoef(est[0], 'pred', 'resp') r_floor = nmet.r_floor(val[0], 'pred', 'resp') if rec is not None: # print('running r_ceiling') r_ceiling = nmet.r_ceiling(val[0], rec, 'pred', 'resp') mse_test, se_mse_test = nmet.j_nmse(val[0], 'pred', 'resp') mse_fit, se_mse_fit = nmet.j_nmse(est[0], 'pred', 'resp') else: # unclear if this ever excutes since jackknifed val sets are # typically already merged r = [nmet.corrcoef(p, 'pred', 'resp') for p in val] r_test = np.mean(r) se_test = np.std(r) / np.sqrt(len(val)) r = [nmet.corrcoef(p, 'pred', 'resp') for p in est] r_fit = np.mean(r) se_fit = np.std(r) / np.sqrt(len(val)) r_floor = [nmet.r_floor(p, 'pred', 'resp') for p in val] # TODO compute r_ceiling for multiple val sets r_ceiling = 0 mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val] mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est] se_mse_test = np.std(mse_test) / np.sqrt(len(val)) se_mse_fit = np.std(mse_fit) / np.sqrt(len(est)) mse_test = np.mean(mse_test) mse_fit = np.mean(mse_fit) ll_test = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in val] ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est] modelspecs[0][0]['meta']['r_test'] = r_test modelspecs[0][0]['meta']['se_test'] = se_test modelspecs[0][0]['meta']['r_floor'] = r_floor modelspecs[0][0]['meta']['mse_test'] = mse_test modelspecs[0][0]['meta']['se_mse_test'] = se_mse_test modelspecs[0][0]['meta']['ll_test'] = np.mean(ll_test) modelspecs[0][0]['meta']['r_fit'] = r_fit modelspecs[0][0]['meta']['se_fit'] = se_fit modelspecs[0][0]['meta']['r_ceiling'] = r_ceiling modelspecs[0][0]['meta']['mse_fit'] = mse_fit modelspecs[0][0]['meta']['se_mse_fit'] = se_mse_fit modelspecs[0][0]['meta']['ll_fit'] = np.mean(ll_fit) return modelspecs
def standard_correlation(est, val, modelspec=None, modelspecs=None, rec=None, use_mask=True, **context): # use_mask: mask before computing metrics (if mask exists) # Compute scores for validation dat r_ceiling = 0 # deprecated support for modelspecs lists if modelspecs is not None: raise Warning('Use of modelspecs list is deprecated') # by default, assume that model is trying to predict resp signal output_name = modelspec.meta.get('output_name', 'resp') # TODO: support for multiple views -- if ever desired? usually validation set views # should have been recombined by now, right? view_count = val.view_count # KLUDGE ALERT! # only compute results for first jackknife -- for simplicity, not optimal! # only works if view_count==1 or resp_count(# resp channels)==1 est_mult = modelspec.jack_count out_chan_count = val[output_name].shape[0] r_test = np.zeros((out_chan_count, view_count)) se_test = np.zeros((out_chan_count, view_count)) r_fit = np.zeros((out_chan_count, view_count)) se_fit = np.zeros((out_chan_count, view_count)) r_floor = np.zeros((out_chan_count, view_count)) r_ceiling = np.zeros((out_chan_count, view_count)) mse_test = np.zeros((out_chan_count, view_count)) se_mse_test = np.zeros((out_chan_count, view_count)) mse_fit = np.zeros((out_chan_count, view_count)) se_mse_fit = np.zeros((out_chan_count, view_count)) ll_test = np.zeros((out_chan_count, view_count)) ll_fit = np.zeros((out_chan_count, view_count)) for i in range(view_count): if ('mask' in val.signals.keys()) and use_mask: v = val.set_view(i).apply_mask() e = est.set_view(i * est_mult).apply_mask() else: v = val.set_view(i) e = est.set_view(i * est_mult) use_mask = False r_test[:, i], se_test[:, i] = nmet.j_corrcoef(v, 'pred', output_name) r_fit[:, i], se_fit[:, i] = nmet.j_corrcoef(e, 'pred', output_name) r_floor[:, i] = nmet.r_floor(v, 'pred', output_name) mse_test[:, i], se_mse_test[:, i] = nmet.j_nmse(v, 'pred', output_name) mse_fit[:, i], se_mse_fit[:, i] = nmet.j_nmse(e, 'pred', output_name) ll_test[:, i] = nmet.likelihood_poisson(v, 'pred', output_name) ll_fit[:, i] = nmet.likelihood_poisson(e, 'pred', output_name) if rec is not None: #if 'mask' in rec.signals.keys() and use_mask: # r = rec.apply_mask() #else: r = rec # print('running r_ceiling') r_ceiling[:, i] = nmet.r_ceiling(v, r, 'pred', output_name) """ # fix view_index = 0 i = 0 if ('mask' in val.signals.keys()) and use_mask: v = val.set_view(i).apply_mask() e = est.set_view(i*est_mult).apply_mask() else: v = val.set_view(i) e = est.set_view(i*est_mult) use_mask = False r_test, se_test = nmet.j_corrcoef(v, 'pred', output_name) r_fit, se_fit = nmet.j_corrcoef(e, 'pred', output_name) r_floor = nmet.r_floor(v, 'pred', output_name) mse_test, se_mse_test = nmet.j_nmse(v, 'pred', output_name) mse_fit, se_mse_fit = nmet.j_nmse(e, 'pred', output_name) ll_test = nmet.likelihood_poisson(v, 'pred', output_name) ll_fit = nmet.likelihood_poisson(e, 'pred', output_name) if rec is not None: if 'mask' in rec.signals.keys() and use_mask: r = rec.apply_mask() else: r = rec # print('running r_ceiling') r_ceiling = nmet.r_ceiling(v, r, 'pred', output_name) """ modelspec.meta['r_test'] = r_test modelspec.meta['se_test'] = se_test modelspec.meta['r_floor'] = r_floor modelspec.meta['mse_test'] = mse_test modelspec.meta['se_mse_test'] = se_mse_test modelspec.meta['ll_test'] = ll_test modelspec.meta['r_fit'] = r_fit modelspec.meta['se_fit'] = se_fit modelspec.meta['r_ceiling'] = r_ceiling modelspec.meta['mse_fit'] = mse_fit modelspec.meta['se_mse_fit'] = se_mse_fit modelspec.meta['ll_fit'] = ll_fit return modelspec
def standard_correlation(est, val, modelspecs, rec=None, use_mask=True): # use_mask: mask before computing metrics (if mask exists) # Compute scores for validation dat r_ceiling = 0 if type(val) is not list: if ('mask' in val[0].signals.keys()) and use_mask: v = val.apply_mask() e = est.apply_mask() else: v = val e = est r_test, se_test = nmet.j_corrcoef(v, 'pred', 'resp') r_fit, se_fit = nmet.j_corrcoef(e, 'pred', 'resp') r_floor = nmet.r_floor(v, 'pred', 'resp') if rec is not None: # print('running r_ceiling') r_ceiling = nmet.r_ceiling(v, rec, 'pred', 'resp') mse_test = nmet.j_nmse(v, 'pred', 'resp') mse_fit = nmet.j_nmse(e, 'pred', 'resp') elif len(val) == 1: if ('mask' in val[0].signals.keys()) and use_mask: v = val[0].apply_mask() e = est[0].apply_mask() else: v = val[0] e = est[0] r_test, se_test = nmet.j_corrcoef(v, 'pred', 'resp') r_fit, se_fit = nmet.j_corrcoef(e, 'pred', 'resp') r_floor = nmet.r_floor(v, 'pred', 'resp') if rec is not None: try: # print('running r_ceiling') r_ceiling = nmet.r_ceiling(v, rec, 'pred', 'resp') except: r_ceiling = 0 mse_test, se_mse_test = nmet.j_nmse(v, 'pred', 'resp') mse_fit, se_mse_fit = nmet.j_nmse(e, 'pred', 'resp') else: # unclear if this ever excutes since jackknifed val sets are # typically already merged r = [nmet.corrcoef(p, 'pred', 'resp') for p in val] r_test = np.mean(r) se_test = np.std(r) / np.sqrt(len(val)) r = [nmet.corrcoef(p, 'pred', 'resp') for p in est] r_fit = np.mean(r) se_fit = np.std(r) / np.sqrt(len(val)) r_floor = [nmet.r_floor(p, 'pred', 'resp') for p in val] # TODO compute r_ceiling for multiple val sets r_ceiling = 0 mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val] mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est] se_mse_test = np.std(mse_test) / np.sqrt(len(val)) se_mse_fit = np.std(mse_fit) / np.sqrt(len(est)) mse_test = np.mean(mse_test) mse_fit = np.mean(mse_fit) ll_test = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in val] ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est] modelspecs[0][0]['meta']['r_test'] = r_test modelspecs[0][0]['meta']['se_test'] = se_test modelspecs[0][0]['meta']['r_floor'] = r_floor modelspecs[0][0]['meta']['mse_test'] = mse_test modelspecs[0][0]['meta']['se_mse_test'] = se_mse_test modelspecs[0][0]['meta']['ll_test'] = np.mean(ll_test) modelspecs[0][0]['meta']['r_fit'] = r_fit modelspecs[0][0]['meta']['se_fit'] = se_fit modelspecs[0][0]['meta']['r_ceiling'] = r_ceiling modelspecs[0][0]['meta']['mse_fit'] = mse_fit modelspecs[0][0]['meta']['se_mse_fit'] = se_mse_fit modelspecs[0][0]['meta']['ll_fit'] = np.mean(ll_fit) return modelspecs
def standard_correlation_by_epochs(est, val, modelspec=None, modelspecs=None, epochs_list=None, rec=None): """ Does the same thing as standard_correlation, excpet with subsets of data defined by epochs_list To use this, first add epochs to define subsets of data. Then, pass epochs_list as a list of subsets to test. For example, ['A', 'B', ['A', 'B']] will measure correlations separately for all epochs marked 'A', all epochs marked 'B', and all epochs marked 'A'or 'B' """ # some crazy stuff to maintain backward compatibility # eventually we will only support modelspec and deprecate support for # modelspecs lists if modelspecs is not None: raise Warning('Use of modelspecs list is deprecated') modelspec = modelspecs[0] list_modelspec = True else: list_modelspec = False for epochs in epochs_list: # Create a label for this subset. If epochs is a list, join elements with "+" epoch_list_str = "+".join([str(x) for x in epochs]) # Make a copy for this subset val_copy = copy.deepcopy(val) for vc in val_copy: vc['resp'] = vc['resp'].select_epochs(epochs) est_copy = copy.deepcopy(est) for ec in est_copy: ec['resp'] = ec['resp'].select_epochs(epochs) # Compute scores for validation data r_test = [nmet.corrcoef(p, 'pred', 'resp') for p in val_copy] mse_test = [nmet.nmse(p, 'pred', 'resp') for p in val_copy] ll_test = [ nmet.likelihood_poisson(p, 'pred', 'resp') for p in val_copy ] r_floor = [nmet.r_floor(p, 'pred', 'resp') for p in val] if rec is not None: r_ceiling = [ nmet.r_ceiling(p, rec, 'pred', 'resp') for p in val_copy ] # Repeat for est data. r_fit = [nmet.corrcoef(p, 'pred', 'resp') for p in est_copy] mse_fit = [nmet.nmse(p, 'pred', 'resp') for p in est_copy] ll_fit = [nmet.likelihood_poisson(p, 'pred', 'resp') for p in est_copy] #Avergage modelspec.meta[epoch_list_str] = {} modelspec.meta[epoch_list_str]['r_test'] = np.mean(r_test) modelspec.meta[epoch_list_str]['mse_test'] = np.mean(mse_test) modelspec.meta[epoch_list_str]['ll_test'] = np.mean(ll_test) modelspec.meta[epoch_list_str]['r_fit'] = np.mean(r_fit) modelspec.meta[epoch_list_str]['r_floor'] = np.mean(r_floor) if rec is not None: modelspec.meta[epoch_list_str]['r_ceiling'] = np.mean(r_ceiling) modelspec.meta[epoch_list_str]['mse_fit'] = np.mean(mse_fit) modelspec.meta[epoch_list_str]['ll_fit'] = np.mean(ll_fit) if list_modelspec: # backward compatibility return [modelspec] else: return modelspec