예제 #1
0
def _correlation(res):
    res = filter.exclude(res, {'odor_valence': 'US'})
    # for i, dff in enumerate(res['data']):
    #     s = res['DAQ_O_ON_F'][i]
    #     e = res['DAQ_W_ON_F'][i]
    #     amplitude = np.mean(dff[:,:, s:e], axis=2)
    #     corrcoefs = np.corrcoef(amplitude.T)
    #     mask = ~np.eye(corrcoefs.shape[0], dtype=bool)
    #     corrcoef = np.mean(corrcoefs[mask])
    #     res['consistency_corrcoef'].append(corrcoef)

    for i, dff in enumerate(res['data']):
        cell_mask = res['msig'][i]
        s = res['DAQ_O_ON_F'][i]
        e = res['DAQ_W_ON_F'][i]
        dff = dff[:, :, s:e]
        corrcoef = []
        for dff_per_cell in dff:
            corrcoefs = np.corrcoef(dff_per_cell)
            diagmask = ~np.eye(corrcoefs.shape[0], dtype=bool)
            nanmask = ~np.isnan(corrcoefs)
            mask = np.logical_and(diagmask, nanmask)
            corrcoef_per_cell = np.mean(corrcoefs[mask])
            corrcoef.append(corrcoef_per_cell)
        corrcoef = np.array(corrcoef)
        mask = ~np.isnan(corrcoef, dtype=bool)
        corrcoef_ = np.mean(corrcoef[mask])
        res['consistency_corrcoef'].append(corrcoef_)
    res['consistency_corrcoef'] = np.array(res['consistency_corrcoef'])
    return res
예제 #2
0
def custom_get_res(start_time, end_time):
    list_of_res = []
    for i, condition in enumerate(conditions):
        if any(s in condition.name for s in ['YFP', 'HALO', 'JAWS']):
            data_path = os.path.join(Config.LOCAL_DATA_PATH,
                                     Config.LOCAL_DATA_BEHAVIOR_FOLDER,
                                     condition.name)
        else:
            data_path = os.path.join(Config.LOCAL_DATA_PATH,
                                     Config.LOCAL_DATA_TIMEPOINT_FOLDER,
                                     condition.name)
        res = custom_analyze_behavior(data_path,
                                      condition,
                                      start_time=start_time,
                                      end_time=end_time)

        if condition.name == 'OFC_LONGTERM':
            res = filter.exclude(res, {'mouse': 3})

        if 'YFP' in condition.name:
            res['condition'] = np.array(['YFP'] * len(res['mouse']))
        elif 'JAWS' in condition.name:
            res['condition'] = np.array(['JAWS'] * len(res['mouse']))
        elif 'HALO' in condition.name:
            res['condition'] = np.array(['HALO'] * len(res['mouse']))
        else:
            res['condition'] = np.array([condition.name] * len(res['mouse']))
        list_of_res.append(res)
    all_res = defaultdict(list)
    for res, condition in zip(list_of_res, conditions):
        reduce.chain_defaultdicts(all_res, res)
    return all_res
예제 #3
0
def scalpel(res2, keys, mouse, sessions, valence='CS+'):
    temp = filter.filter(res2, {'mouse': mouse, 'odor_valence': valence})
    print(temp['mouse'].size)
    assert temp['mouse'].size == 1
    ix = np.isin(temp['session'][0], sessions)
    ix = np.invert(ix)
    for k in keys:
        temp[k][0] = temp[k][0][ix]
    _, inverse = np.unique(temp['session'][0], return_inverse=True)
    temp['session'][0] = inverse
    res2 = filter.exclude(res2, {'mouse': mouse, 'odor_valence': valence})
    reduce.chain_defaultdicts(res2, temp)
    return res2
예제 #4
0
def get_roc(res):
    def _dprime(a, b):
        u1, u2 = np.mean(a), np.mean(b)
        s1, s2 = np.std(a), np.std(b)
        return (u1 - u2) / np.sqrt(.5 * (np.square(s1) + np.square(s2)))

    def _roc(a, b):
        import sklearn.metrics
        data = np.concatenate((a, b))
        labels = np.concatenate(
            (np.ones_like(a), np.zeros_like(b))).astype('bool')
        roc = sklearn.metrics.roc_auc_score(labels, data)
        return roc

    def _rolling_window(a, window):
        shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
        strides = a.strides + (a.strides[-1], )
        return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)

    key = 'lick'
    print_key = 'roc'
    x_key = 'roc_trial'
    window = 10
    res[print_key] = np.copy(res[key])
    res[x_key] = np.copy(res['trial'])

    res = filter.exclude(res, {'odor_valence': 'US'})
    res_ = reduce.new_filter_reduce(res,
                                    filter_keys=['mouse', 'odor_valence'],
                                    reduce_key=key)
    combinations, list_of_ixs = filter.retrieve_unique_entries(res_, ['mouse'])

    for i, ixs in enumerate(list_of_ixs):
        assert len(ixs) == 2
        assert res_['odor_valence'][ixs[0]] == 'CS+'
        assert res_['odor_valence'][ixs[1]] == 'CS-'

        a = _rolling_window(res_[key][ixs[0]], window)
        b = _rolling_window(res_[key][ixs[1]], window)
        dprimes = np.array([_roc(x, y) for x, y in zip(a, b)])

        res_[print_key][ixs[0]] = dprimes
        res_[print_key][ixs[1]] = dprimes
        res_[x_key][ixs[0]] = np.arange(len(dprimes))
        res_[x_key][ixs[1]] = np.arange(len(dprimes))
    return res_
예제 #5
0
def plot_summary_odor_pretraining(res,
                                  start_days,
                                  end_days,
                                  arg_naive,
                                  figure_path,
                                  save,
                                  excitatory=True):
    ax_args_copy = ax_args.copy()
    res = copy.copy(res)
    list_of_days = list(zip(start_days, end_days))
    mice = np.unique(res['mouse'])
    start_end_day_res = filter.filter_days_per_mouse(
        res, days_per_mouse=list_of_days)
    get_responsive_cells(start_end_day_res)

    if arg_naive:
        day_start = filter.filter(start_end_day_res,
                                  {'odor_standard': 'PT Naive'})
        day_start['odor_standard'] = np.array(['PT CS+'] *
                                              len(day_start['odor_standard']))
        day_end = filter.filter_days_per_mouse(start_end_day_res,
                                               days_per_mouse=end_days)
        day_end = filter.filter(day_end, {'odor_standard': 'PT CS+'})
        reduce.chain_defaultdicts(day_start, day_end)
        start_end_day_res = day_start
    else:
        start_end_day_res = filter.exclude(start_end_day_res,
                                           {'odor_standard': 'PT Naive'})

    add_naive_learned(start_end_day_res, start_days, end_days, 'a', 'b')
    filter.assign_composite(start_end_day_res,
                            loop_keys=['odor_standard', 'training_day'])

    odor_list = ['PT CS+']
    colors = ['Orange']
    ax_args_copy = ax_args_copy.copy()
    ax_args_copy.update({
        'xlim': [-1, 10],
        'ylim': [0, .4],
        'yticks': [0, .1, .2, .3, .4]
    })
    for i, odor in enumerate(odor_list):
        save_arg = False
        reuse_arg = True
        if i == 0:
            reuse_arg = False

        if save and i == len(odor_list) - 1:
            save_arg = True

        plot.plot_results(start_end_day_res,
                          select_dict={'odor_standard': odor},
                          x_key='odor_standard_training_day',
                          y_key='Fraction Responsive',
                          loop_keys='mouse',
                          colors=[colors[i]] * len(mice),
                          path=figure_path,
                          plot_args=line_args,
                          ax_args=ax_args_copy,
                          save=save_arg,
                          reuse=reuse_arg,
                          fig_size=(2.5, 1.5),
                          legend=False,
                          name_str='_E' if excitatory else '_I')

    before_csm = filter.filter(start_end_day_res,
                               filter_dict={
                                   'training_day': 'a',
                                   'odor_standard': 'PT CS+'
                               })
    after_csm = filter.filter(start_end_day_res,
                              filter_dict={
                                  'training_day': 'b',
                                  'odor_standard': 'PT CS+'
                              })
    from scipy.stats import ranksums, wilcoxon, kruskal

    print('Before PT CS+: {}'.format(np.mean(
        before_csm['Fraction Responsive'])))
    print('After PT CS+: {}'.format(np.mean(after_csm['Fraction Responsive'])))
    from scipy.stats import sem
    print('After PT CS+ STD: {}'.format(sem(after_csm['Fraction Responsive'])))
    print('Wilcoxin:{}'.format(
        wilcoxon(before_csm['Fraction Responsive'],
                 after_csm['Fraction Responsive'])))
예제 #6
0
def plot_summary_odor_and_water(res,
                                odor_start_days,
                                water_start_days,
                                end_days,
                                use_colors=True,
                                excitatory=True,
                                arg='odor_valence',
                                figure_path=None):
    include_water = True

    ax_args_copy = ax_args.copy()
    res = copy.copy(res)
    get_responsive_cells(res)
    mice = np.unique(res['mouse'])

    list_of_days = list(zip(odor_start_days, end_days))
    start_end_day_res = filter.filter_days_per_mouse(
        res, days_per_mouse=list_of_days)
    start_end_day_res = filter.exclude(start_end_day_res,
                                       {'odor_valence': 'US'})
    add_naive_learned(start_end_day_res, odor_start_days, end_days, 'a', 'b')

    if include_water:
        list_of_days = list(zip(water_start_days, end_days))
        start_end_day_res_water = filter.filter_days_per_mouse(
            res, days_per_mouse=list_of_days)
        start_end_day_res_water = filter.filter(start_end_day_res_water,
                                                {'odor_valence': 'US'})
        add_naive_learned(start_end_day_res_water, water_start_days, end_days,
                          'a', 'b')
        reduce.chain_defaultdicts(start_end_day_res, start_end_day_res_water)

    ax_args_copy = ax_args_copy.copy()
    if arg == 'odor_valence':
        start_end_day_res = reduce.new_filter_reduce(
            start_end_day_res,
            filter_keys=['training_day', 'mouse', 'odor_valence'],
            reduce_key='Fraction Responsive')
        odor_list = ['CS+', 'CS-']
        ax_args_copy.update({
            'xlim': [-1, 6],
            'ylim': [0, .6],
            'yticks': [0, .1, .2, .3, .4, .5]
        })
        colors = ['Green', 'Red']
    elif arg == 'naive':
        arg = 'odor_valence'
        start_end_day_res = reduce.new_filter_reduce(
            start_end_day_res,
            filter_keys=['training_day', 'mouse', 'odor_valence'],
            reduce_key='Fraction Responsive')
        odor_list = ['CS+']
        ax_args_copy.update({
            'xlim': [-1, 4],
            'ylim': [0, .6],
            'yticks': [0, .1, .2, .3, .4, .5]
        })
        colors = ['GoldenRod']
    else:
        odor_list = ['CS+1', 'CS+2', 'CS-1', 'CS-2']
        colors = ['Green', 'Green', 'Red', 'Red']
        ax_args_copy.update({
            'xlim': [-1, 10],
            'ylim': [0, .6],
            'yticks': [0, .1, .2, .3, .4, .5]
        })

    filter.assign_composite(start_end_day_res, loop_keys=[arg, 'training_day'])
    if not use_colors:
        colors = ['Black'] * 4

    name_str = '_E' if excitatory else '_I'
    for i, odor in enumerate(odor_list):
        reuse_arg = True
        if i == 0:
            reuse_arg = False
        plot.plot_results(start_end_day_res,
                          select_dict={arg: odor},
                          x_key=arg + '_training_day',
                          y_key='Fraction Responsive',
                          loop_keys='mouse',
                          colors=[colors[i]] * len(mice),
                          path=figure_path,
                          plot_args=line_args,
                          ax_args=ax_args_copy,
                          save=False,
                          reuse=reuse_arg,
                          fig_size=(2.5, 1.5),
                          legend=False,
                          name_str=','.join([str(x) for x in odor_start_days]))

    plot.plot_results(start_end_day_res,
                      select_dict={'odor_standard': 'US'},
                      x_key='training_day',
                      y_key='Fraction Responsive',
                      loop_keys='mouse',
                      colors=['Turquoise'] * len(mice),
                      path=figure_path,
                      plot_args=line_args,
                      ax_args=ax_args_copy,
                      fig_size=(1.6, 1.5),
                      legend=False,
                      reuse=True,
                      save=True,
                      name_str=name_str)

    before_odor = filter.filter(start_end_day_res,
                                filter_dict={
                                    'training_day': 'a',
                                    'odor_valence': ['CS+', 'CS-']
                                })
    after_odor = filter.filter(start_end_day_res,
                               filter_dict={
                                   'training_day': 'b',
                                   'odor_valence': ['CS+', 'CS-']
                               })
    before_csp = filter.filter(start_end_day_res,
                               filter_dict={
                                   'training_day': 'a',
                                   'odor_valence': 'CS+'
                               })
    after_csp = filter.filter(start_end_day_res,
                              filter_dict={
                                  'training_day': 'b',
                                  'odor_valence': 'CS+'
                              })
    before_csm = filter.filter(start_end_day_res,
                               filter_dict={
                                   'training_day': 'a',
                                   'odor_valence': 'CS-'
                               })
    after_csm = filter.filter(start_end_day_res,
                              filter_dict={
                                  'training_day': 'b',
                                  'odor_valence': 'CS-'
                              })
    before_water = filter.filter(start_end_day_res,
                                 filter_dict={
                                     'training_day': 'a',
                                     'odor_valence': 'US'
                                 })
    after_water = filter.filter(start_end_day_res,
                                filter_dict={
                                    'training_day': 'b',
                                    'odor_valence': 'US'
                                })

    try:
        from scipy.stats import ranksums, wilcoxon, kruskal

        print('Before Odor: {}'.format(
            np.mean(before_odor['Fraction Responsive'])))
        print('After Odor: {}'.format(
            np.mean(after_odor['Fraction Responsive'])))
        print('Wilcoxin:{}'.format(
            wilcoxon(before_odor['Fraction Responsive'],
                     after_odor['Fraction Responsive'])))

        print('Before CS+: {}'.format(
            np.mean(before_csp['Fraction Responsive'])))
        print('After CS+: {}'.format(np.mean(
            after_csp['Fraction Responsive'])))
        print('Wilcoxin:{}'.format(
            wilcoxon(before_csp['Fraction Responsive'],
                     after_csp['Fraction Responsive'])))

        print('Before CS-: {}'.format(
            np.mean(before_csm['Fraction Responsive'])))
        print('After CS-: {}'.format(np.mean(
            after_csm['Fraction Responsive'])))
        print('Wilcoxin:{}'.format(
            wilcoxon(before_csm['Fraction Responsive'],
                     after_csm['Fraction Responsive'])))

        print('Before US: {}'.format(
            np.mean(before_water['Fraction Responsive'])))
        print('After US: {}'.format(np.mean(
            after_water['Fraction Responsive'])))
        print('Wilcoxin:{}'.format(
            wilcoxon(before_water['Fraction Responsive'],
                     after_water['Fraction Responsive'])))
    except:
        print('stats didnt work')
예제 #7
0
    res = statistics.analyze.analyze_data(save_path,
                                          condition_config,
                                          m_threshold=0.04)

    # naive_config = statistics.analyze.OFC_LONGTERM_Config()
    # data_path_ = os.path.join(Config.LOCAL_DATA_PATH, Config.LOCAL_DATA_TIMEPOINT_FOLDER, naive_config.condition.name)
    # save_path_ = os.path.join(Config.LOCAL_EXPERIMENT_PATH, 'COUNTING', naive_config.condition.name)
    # res_naive = statistics.analyze.analyze_data(save_path_, condition_config, m_threshold=0.04)
    # res_naive = filter.exclude(res_naive, {'mouse': 3})
    # res_naive['mouse'] += 5
    # temp_res_naive = behavior.behavior_analysis.analyze_behavior(data_path_, naive_config.condition)
    # temp_res_naive = filter.filter(temp_res_naive, {'odor_valence': ['CS+']})
    # temp_res_naive = filter.exclude(temp_res_naive, {'mouse': 3})
    # temp_res_naive['mouse'] += 5
    res = filter.exclude(res, {'day': 0})
    # reduce.chain_defaultdicts(res, res_naive)
    # reduce.chain_defaultdicts(temp_res, temp_res_naive)
    # learned_days_combined = [3, 3, 2, 3, 3, 3, 2, 2]
    # last_days_combined = [5, 5, 3, 4, 4, 8, 7, 5]

    cory.main(res, temp_res, figure_path, excitatory=True, valence='CS+')
    # cory.main(res, temp_res, figure_path, excitatory=False,valence='CS+')
    # cory.main(res, temp_res, figure_path, excitatory=True,valence='CS-')
    # cory.main(res, temp_res, figure_path, excitatory=False,valence='CS-')

    # waveform.behavior_vs_neural_onset(res, temp_res, learned_day_per_mouse, last_day_per_mouse, figure_path,
    #                                   behavior_arg='onset')
    # waveform.behavior_vs_neural_onset(res, temp_res, learned_days_combined, last_days_combined, figure_path, behavior_arg='com')
    # waveform.behavior_vs_neural_onset(res, temp_res, learned_days_combined, last_days_combined, figure_path, behavior_arg='onset')
    # waveform.behavior_vs_neural_onset(res, temp_res, learned_days_combined, last_days_combined, figure_path, behavior_arg='magnitude')
예제 #8
0
color_dict = {'Pretraining_CS+': 'C1', 'Discrimination_CS+':'green', 'Discrimination_CS-':'red'}
res = defaultdict(list)
for experiment in experiments:
    for directory in directories:
        halo_files = sorted(glob.glob(os.path.join(experiment.path, directory, constants.halo + '*')))
        yfp_files = sorted(glob.glob(os.path.join(experiment.path, directory, constants.yfp + '*')))
        res1 = analysis.parse(halo_files, experiment=experiment, condition=constants.halo, phase = directory,
                              add_raw=False)
        res1['experiment'] = np.array([experiment.name] * len(res1['odor_valence']))
        res2 = analysis.parse(yfp_files, experiment=experiment, condition=constants.yfp, phase = directory,
                              add_raw=False)
        res2['experiment'] = np.array([experiment.name] * len(res2['odor_valence']))

        if experiment.name == 'MPFC_DT':
            res1 = filter.exclude(res1, {'mouse':['H01']})
            res2 = filter.exclude(res2, {'mouse':['H01']})
        if experiment.name == 'MPFC_PT':
            res1 = filter.exclude(res1, {'mouse': ['Y01']})
            res2 = filter.exclude(res2, {'mouse': ['Y01']})
        # if experiment.name == 'OFC_DT': # the unusual mouse
        #     res1 = filter.exclude(res1, {'mouse': ['H03']})
        #     res2 = filter.exclude(res2, {'mouse': ['H03']})

        keys = analysis.Indices().__dict__.keys()
        if experiment.name == 'OFC_PT' and directory == constants.pretraining_directory:
            mice = ['Y11','Y12','Y13']
            session_list = [[2,3,4,5,6], [5,7,9,11],[4,5,6]]

            for mouse, sessions in zip(mice, session_list):
                res2 = scalpel(res2, keys, mouse, sessions)
예제 #9
0
def plot_power(res,
               start_days,
               end_days,
               figure_path,
               excitatory=True,
               odor_valence=('CS+'),
               naive=False,
               colors_before={
                   'CS+': 'Green',
                   'CS-': 'Red'
               },
               colors_after={
                   'CS+': 'Green',
                   'CS-': 'Red'
               },
               ylim=[0, .1],
               align=True,
               pad=True):
    res = copy.copy(res)
    _power(res, excitatory)

    if pad:
        right_on = np.median(res['DAQ_O_ON_F'])
        for i, odor_on in enumerate(res['DAQ_O_ON_F']):
            if np.abs(odor_on - right_on) > 2:
                diff = (right_on - odor_on).astype(int)
                if diff > 0:
                    p = res['Power'][i]
                    newp = np.zeros_like(p)
                    newp[:diff] = p[0]
                    newp[diff:] = p[:-diff]
                    res['Power'][i] = newp
                    print('early odor time. mouse: {}, day: {}'.format(
                        res['mouse'][i], res['day'][i]))
                else:
                    p = res['Power'][i]
                    newp = np.zeros_like(p)
                    newp[:diff] = p[-diff:]
                    newp[diff:] = p[-1]
                    res['Power'][i] = newp
                    print('late odor time. mouse: {}, day: {}'.format(
                        res['mouse'][i], res['day'][i]))

    if align:
        nF = [len(x) for x in res['Power']]
        max_frame = np.max(nF)
        for i, p in enumerate(res['Power']):
            if len(p) < max_frame:
                newp = np.zeros(max_frame)
                newp[:len(p)] = p
                newp[len(p):] = p[-1]
                res['Power'][i] = newp
                res['Time'][i] = np.arange(0, max_frame)
                print('pad frames. mouse: {}, day: {}'.format(
                    res['mouse'][i], res['day'][i]))

    list_of_days = list(zip(start_days, end_days))
    start_end_day_res = filter.filter_days_per_mouse(
        res, days_per_mouse=list_of_days)
    add_naive_learned(start_end_day_res, start_days, end_days)
    if naive:
        start_end_day_res = filter.exclude(start_end_day_res, {
            'odor_standard': 'PT CS+',
            'training_day': 'Naive'
        })
        ix = start_end_day_res['odor_valence'] == 'PT Naive'
        start_end_day_res['odor_valence'][ix] = 'PT CS+'
    start_end_day_sum_res = reduce.new_filter_reduce(
        start_end_day_res,
        filter_keys=['training_day', 'odor_valence'],
        reduce_key='Power')

    ax_args_copy = trace_ax_args.copy()
    if excitatory:
        yticks = np.arange(0, .2, .05)
    else:
        yticks = -1 * np.arange(0, .2, .025)
    ax_args_copy.update({
        'xticks': [res['DAQ_O_ON_F'][-1], res['DAQ_W_ON_F'][-1]],
        'xticklabels': ['ON', 'US'],
        'ylim':
        ylim,
        'yticks':
        yticks
    })

    colors_b = [colors_before[x] for x in odor_valence]
    colors = [colors_after[x] for x in odor_valence]

    strr = ','.join([str(x) for x in start_days]) + '_' + ','.join(
        [str(x) for x in end_days])
    if excitatory:
        strr += '_E'
    else:
        strr += '_I'
    plot.plot_results(start_end_day_sum_res,
                      select_dict={
                          'odor_valence': odor_valence,
                          'training_day': 'Naive'
                      },
                      x_key='Time',
                      y_key='Power',
                      loop_keys='odor_valence',
                      error_key='Power_sem',
                      path=figure_path,
                      plot_function=plt.fill_between,
                      plot_args=fill_args,
                      ax_args=ax_args_copy,
                      colors=colors_b,
                      fig_size=(2, 1.5),
                      rect=(.3, .2, .6, .6),
                      save=False)

    plot.plot_results(start_end_day_sum_res,
                      select_dict={
                          'odor_valence': odor_valence,
                          'training_day': 'Naive'
                      },
                      x_key='Time',
                      y_key='Power',
                      loop_keys='odor_valence',
                      path=figure_path,
                      plot_args=trace_args,
                      ax_args=ax_args_copy,
                      colors=colors_b,
                      fig_size=(2, 1.5),
                      reuse=True,
                      save=False)

    plot.plot_results(start_end_day_sum_res,
                      select_dict={
                          'odor_valence': odor_valence,
                          'training_day': 'Learned'
                      },
                      x_key='Time',
                      y_key='Power',
                      loop_keys='odor_valence',
                      error_key='Power_sem',
                      path=figure_path,
                      plot_function=plt.fill_between,
                      plot_args=fill_args,
                      ax_args=ax_args_copy,
                      colors=colors,
                      fig_size=(2, 1.5),
                      reuse=True,
                      save=False)

    plot.plot_results(start_end_day_sum_res,
                      select_dict={
                          'odor_valence': odor_valence,
                          'training_day': 'Learned'
                      },
                      x_key='Time',
                      y_key='Power',
                      loop_keys='odor_valence',
                      path=figure_path,
                      plot_args=trace_args,
                      ax_args=ax_args_copy,
                      colors=colors,
                      fig_size=(2, 1.5),
                      reuse=True,
                      name_str=strr)

    for i, x in enumerate(start_end_day_res['Power']):
        on, off = [
            start_end_day_res['DAQ_O_ON_F'][i],
            start_end_day_res['DAQ_W_ON_F'][i]
        ]
        y = np.max(x[on:off]) - np.min(x)
        start_end_day_res['stat'].append(y)
    start_end_day_res['stat'] = np.array(start_end_day_res['stat'])

    for valence in odor_valence:
        before_odor = filter.filter(start_end_day_res,
                                    filter_dict={
                                        'training_day': 'Naive',
                                        'odor_valence': valence
                                    })
        after_odor = filter.filter(start_end_day_res,
                                   filter_dict={
                                       'training_day': 'Learned',
                                       'odor_valence': valence
                                   })

        try:
            from scipy.stats import ranksums, wilcoxon, kruskal
            print(before_odor['odor_valence'])
            # print('Before: {}'.format(before_odor['stat']))
            print('Before: {}'.format(np.mean(before_odor['stat'])))
            # print('After: {}'.format(after_odor['stat']))
            print('After: {}'.format(np.mean(after_odor['stat'])))
            print('Wilcoxin:{}'.format(
                wilcoxon(before_odor['stat'], after_odor['stat'])))
        except:
            print('stats didnt work')

    return before_odor['stat'], after_odor['stat']
예제 #10
0
def _correlation(res, loop_keys, shuffle, odor_end=True, direction=0):
    res = filter.exclude(res, {'odor_valence': 'US'})
    for i, dff in enumerate(res['dff']):
        s = res['DAQ_O_ON_F'][i]
        e = res['DAQ_W_ON_F'][i]
        if odor_end:
            amplitude = np.max(dff[:, s:e], axis=1)
        else:
            amplitude = np.max(dff[:, s:], axis=1)
        res['corr_amp'].append(amplitude)
    res['corr_amp'] = np.array(res['corr_amp'])

    combinations, list_of_ixs = filter.retrieve_unique_entries(
        res, loop_keys=loop_keys)

    loop_keys_ = loop_keys + ['odor_valence', 'odor_standard']

    corrcoefs = defaultdict(list)
    for ixs in list_of_ixs:
        data = res['corr_amp'][ixs]
        for i, data_1 in enumerate(data):
            for j, data_2 in enumerate(data):
                if shuffle:
                    n_iter = 10
                    corrcoef = 0
                    for k in range(n_iter):
                        corrcoef += np.corrcoef(
                            (np.random.permutation(data_1),
                             np.random.permutation(data_2)))[0, 1]
                    corrcoef /= (n_iter * 1.0)
                else:
                    if i != j:
                        datas = res['data'][ixs[i]]
                        s = res['DAQ_O_ON_F'][ixs[i]]
                        e = res['DAQ_W_ON_F'][ixs[i]]
                        ds = []
                        for cell_data in datas:
                            config = psth.psth_helper.PSTHConfig()
                            d = psth.psth_helper.subtract_baseline(
                                cell_data, config.baseline_start,
                                s - config.baseline_end)
                            ds.append(d)
                        datas_i = np.array(ds)

                        datas = res['data'][ixs[j]]
                        s = res['DAQ_O_ON_F'][ixs[j]]
                        e = res['DAQ_W_ON_F'][ixs[j]]
                        ds = []
                        for cell_data in datas:
                            config = psth.psth_helper.PSTHConfig()
                            d = psth.psth_helper.subtract_baseline(
                                cell_data, config.baseline_start,
                                s - config.baseline_end)
                            ds.append(d)
                        datas_j = np.array(ds)

                        corrcoefs_ = []
                        for rep in np.arange(100):
                            s_ix_a = np.random.choice(datas_i.shape[1],
                                                      datas_i.shape[1] // 2,
                                                      replace=False)
                            s_ix_b = np.random.choice(datas_j.shape[1],
                                                      datas_j.shape[1] // 2,
                                                      replace=False)
                            dffa = np.mean(datas_i[:, s_ix_a, :], axis=1)
                            dffb = np.mean(datas_j[:, s_ix_b, :], axis=1)
                            if odor_end:
                                dffa = dffa[:, s:e]
                                dffb = dffb[:, s:e]
                            else:
                                dffa = dffa[:, s:]
                                dffb = dffb[:, s:]

                            if direction == 1:
                                dffa[dffa < 0] = 0
                                dffb[dffb < 0] = 0
                                amplitudea = np.max(dffa, axis=1)
                                amplitudeb = np.max(dffb, axis=1)
                            elif direction == -1:
                                dffa[dffa > 0] = 0
                                dffb[dffb > 0] = 0
                                amplitudea = np.min(dffa, axis=1)
                                amplitudeb = np.min(dffb, axis=1)
                            elif direction == 0:
                                amplitudea = np.max(dffa, axis=1)
                                amplitudeb = np.max(dffb, axis=1)
                            else:
                                raise ValueError('no direction given')

                            # if odor_end:
                            #     amplitudea = np.max(dffa[:, s:e], axis=1)
                            #     amplitudeb = np.max(dffb[:, s:e], axis=1)
                            # else:
                            #     amplitudea = np.max(dffa[:, s:], axis=1)
                            #     amplitudeb = np.max(dffb[:, s:], axis=1)
                            corrcoefs_.append(
                                np.corrcoef(amplitudea, amplitudeb)[0, 1])
                        corrcoef = np.mean(corrcoefs_)

                        # corrcoef = np.corrcoef((data_1, data_2))[0, 1]
                    else:
                        # corrcoef = np.corrcoef((data_1, data_2))[0, 1]

                        datas = res['data'][ixs[i]]
                        s = res['DAQ_O_ON_F'][ixs[i]]
                        e = res['DAQ_W_ON_F'][ixs[i]]
                        ds = []
                        for cell_data in datas:
                            config = psth.psth_helper.PSTHConfig()
                            d = psth.psth_helper.subtract_baseline(
                                cell_data, config.baseline_start,
                                s - config.baseline_end)
                            ds.append(d)
                        datas = np.array(ds)

                        corrcoefs_ = []
                        for rep in np.arange(100):
                            s_ix_a = np.random.choice(datas.shape[1],
                                                      datas.shape[1] // 2,
                                                      replace=False)
                            s_ix_b = [
                                x for x in np.arange(datas.shape[1])
                                if x not in s_ix_a
                            ]
                            dffa = np.mean(datas[:, s_ix_a, :], axis=1)
                            dffb = np.mean(datas[:, s_ix_b, :], axis=1)
                            # if odor_end:
                            #     amplitudea = np.max(dffa[:, s:e], axis=1)
                            #     amplitudeb = np.max(dffb[:, s:e], axis=1)
                            # else:
                            #     amplitudea = np.max(dffa[:, s:], axis=1)
                            #     amplitudeb = np.max(dffb[:, s:], axis=1)
                            if direction == 1:
                                dffa[dffa < 0] = 0
                                dffb[dffb < 0] = 0
                                amplitudea = np.max(dffa, axis=1)
                                amplitudeb = np.max(dffb, axis=1)
                            elif direction == -1:
                                dffa[dffa > 0] = 0
                                dffb[dffb > 0] = 0
                                amplitudea = np.min(dffa, axis=1)
                                amplitudeb = np.min(dffb, axis=1)
                            elif direction == 0:
                                amplitudea = np.max(dffa, axis=1)
                                amplitudeb = np.max(dffb, axis=1)
                            else:
                                raise ValueError('no direction given')
                            corrcoefs_.append(
                                np.corrcoef(amplitudea, amplitudeb)[0, 1])
                        corrcoef = np.mean(corrcoefs_)

                corrcoefs['corrcoef'].append(corrcoef)
                corrcoefs['Odor_A'].append(i)
                corrcoefs['Odor_B'].append(j)
                for loop_key in loop_keys_:
                    corrcoefs[loop_key].append(res[loop_key][ixs[0]])

    for key, value in corrcoefs.items():
        corrcoefs[key] = np.array(value)
    return corrcoefs
예제 #11
0
xticks = np.arange(0, im.shape[1]) + .5
yticks = np.arange(0, im.shape[0]) + .5
ax.set_xticks(xticks)
ax.set_yticks(yticks[::-1])
ax.set_xticklabels((xticks + .5).astype(int), fontsize=7)
ax.set_yticklabels((yticks + .5).astype(int), fontsize=7)

ax = fig.add_axes(rect_cb)
cb = plt.colorbar(cax=ax, ticks=np.arange(vmin, vmax + 0.01, 0.1))
cb.outline.set_linewidth(0.5)
cb.set_label('Correlation', fontsize=7, labelpad=2)
plt.tick_params(axis='both', which='major', labelsize=7)
plt.axis('tight')
plot._easy_save(os.path.join(save_path, 'matrix', mouse), 'across_correlation_matrix')

res = filter.exclude(res, {'mouse':'M241_ofc'})
list = ['PIR','OFC','OFC_LONGTERM','OFC_COMPOSITE','MPFC_COMPOSITE']
scatter_args = {'marker':'.', 's':8, 'alpha': .5}
error_args = {'fmt': '', 'capsize': 2, 'elinewidth': 1, 'markersize': 2, 'alpha': .5}
reduce_keys = ['within_day_crisp_average', 'across_day_mean_corrs_average']
xkey = 'experiment'
for reduce_key in reduce_keys:
    res_reduce = reduce.new_filter_reduce(res, filter_keys=['experiment'], reduce_key= reduce_key)

    for i, element in enumerate(list):
        reuse = True if i > 0 else False
        save = False if i != len(list)-1 else True
        plot.plot_results(res, x_key=xkey, y_key= reduce_key,
                          select_dict={xkey:element},
                           plot_function=plt.scatter,
                           plot_args=scatter_args,
list_of_res = []
names = []
behavior_strings = ['YFP', 'HALO', 'JAWS']
for i, condition in enumerate(conditions):
    if any(s in condition.name for s in behavior_strings):
        data_path = os.path.join(Config.LOCAL_DATA_PATH,
                                 Config.LOCAL_DATA_BEHAVIOR_FOLDER,
                                 condition.name)
    else:
        data_path = os.path.join(Config.LOCAL_DATA_PATH,
                                 Config.LOCAL_DATA_TIMEPOINT_FOLDER,
                                 condition.name)
    res = analyze_behavior(data_path, condition)

    if condition.name == 'OFC_LONGTERM':
        res = filter.exclude(res, {'mouse': 3})

    if 'YFP' in condition.name:
        res['condition'] = np.array(['YFP'] * len(res['mouse']))
    elif 'JAWS' in condition.name:
        res['condition'] = np.array(['JAWS'] * len(res['mouse']))
    elif 'HALO' in condition.name:
        res['condition'] = np.array(['HALO'] * len(res['mouse']))
    else:
        res['condition'] = np.array([condition.name] * len(res['mouse']))

    list_of_res.append(res)
    names.append(condition.name)
directory_name = ','.join(names)
all_res = defaultdict(list)
for res, condition in zip(list_of_res, conditions):
예제 #13
0
    # try:
    #     ix_e = all_res[x_key] == 'YFP_PT CS+'
    #     ix_f = all_res[x_key] == 'INH_PT CS+'
    #     rspt = ranksums(all_res[reduce_key][ix_e], all_res[reduce_key][ix_f])
    #     print(all_res[reduce_key][ix_e])
    #     print(all_res[reduce_key][ix_f])
    #     print(rspt)
    # except:
    #     print('no pt')

if 'cdf' in experiments:
    valences = np.unique(all_res['odor_valence'])
    for valence in valences:
        all_res_ = filter.filter(all_res, {'odor_valence': valence})
        ctrl = filter.filter(all_res_, {'condition': 'YFP'})
        experimental = filter.exclude(all_res_, {'condition': 'YFP'})
        ctrl_licks = ctrl['lick']
        experimental_licks = experimental['lick']

        # #shorten
        ctrl_min = np.min([len(x) for x in ctrl_licks])
        channel_min = np.min([len(x) for x in experimental_licks])
        both_min = np.min([ctrl_min, channel_min])
        _shorten = lambda array, length: [x[:length] for x in array]
        ctrl_licks = _shorten(ctrl_licks, both_min)
        experimental_licks = _shorten(experimental_licks, both_min)

        #concatenate
        ctrl_licks_cat = np.concatenate(ctrl_licks)
        experimental_licks_cat = np.concatenate(experimental_licks_cat)