Exemplo n.º 1
0
Arquivo: bar.py Projeto: tbenst/glia
def save_unit_spike_trains(units, stimulus_list, c_add_unit_figures, c_add_retina_figure,
        by='angle'):
    print("Creating bar unit spike trains")
    if by == 'angle':
        get_solid = glia.compose(
            glia.f_create_experiments(stimulus_list),
            glia.f_has_stimulus_type(["BAR"]),
            partial(sorted, key=lambda e: e["stimulus"]["angle"]),
        )
        nplots = get_nplots(stimulus_list,by)
        response = glia.apply_pipeline(get_solid,units)
        result = glia.plot_units(plot_spike_trains_by_angle,response, nplots=nplots,
            ncols=3,ax_xsize=10, ax_ysize=5,
            figure_title="Unit spike train by BAR angle")
    elif by == 'width':
        get_solid = glia.compose(
            glia.f_create_experiments(stimulus_list),
            glia.f_has_stimulus_type(["BAR"]),
            partial(sorted, key=lambda e: e["stimulus"]["width"]),
        )
        nplots = get_nplots(stimulus_list,by)
        response = glia.apply_pipeline(get_solid,units)
        result = glia.plot_units(plot_spike_trains_by_trial,response, nplots=nplots,
            ncols=3,ax_xsize=10, ax_ysize=5,
            figure_title="Unit spike train by BAR angle")


    # nplots = len(speed_widths)
    c_add_unit_figures(result)
    glia.close_figs([fig for the_id,fig in result])
Exemplo n.º 2
0
def plot_solid_versus_bar(fig, axis_gen, data, prepend, append):
    solids, bars_by_speed = data
    for speed in sorted(list(bars_by_speed.keys())):
        bars = bars_by_speed[speed]
        max_lifespan = max(
            bars,
            key=lambda e: e["stimulus"]["lifespan"])["stimulus"]["lifespan"]
        lifespans = set()
        for e in bars:
            # need to calculate duration of light over a particular point
            width = e["stimulus"]["width"]
            light_duration = int(np.ceil(width / speed))
            lifespans.add(light_duration)

        light_wedge = glia.compose(
            partial(filter, lambda x: x["stimulus"]["lifespan"] in lifespans),
            partial(sorted, key=lambda x: x["stimulus"]["lifespan"]))

        sorted_solids = light_wedge(solids)
        sorted_bars = sorted(bars, key=lambda x: x["stimulus"]["width"])

        xlim = glia.axis_continuation(
            lambda axis: axis.set_xlim(0, max_lifespan))
        bar_text = glia.axis_continuation(
            partial(c_plot_bar, title="Bars with speed: {}".format(speed)))

        glia.plot_spike_trains(axis_gen,
                               sorted_solids,
                               prepend,
                               append,
                               continuation=xlim)
        glia.plot_spike_trains(axis_gen,
                               sorted_bars,
                               continuation=glia.compose(xlim, bar_text))
Exemplo n.º 3
0
def save_acuity_chart(units, stimulus_list, c_unit_fig, c_add_retina_figure,
                      prepend, append):
    "Compare SOLID light wedge to BAR response in corresponding ascending width."

    print("Creating acuity chart v3.")
    get_solids = glia.compose(
        glia.f_create_experiments(stimulus_list,
                                  prepend_start_time=prepend,
                                  append_lifespan=append),
        glia.f_has_stimulus_type(["SOLID"]),
    )
    solids = glia.apply_pipeline(get_solids, units, progress=True)

    # offset to avoid diamond pixel artifacts
    get_bars_by_speed = glia.compose(
        glia.f_create_experiments(stimulus_list),
        glia.f_has_stimulus_type(["BAR"]),
        partial(sorted, key=lambda x: x["stimulus"]["angle"]),
        partial(sorted, key=lambda x: x["stimulus"]["width"]),
        partial(glia.group_by, key=lambda x: x["stimulus"]["speed"]))
    bars_by_speed = glia.apply_pipeline(get_bars_by_speed,
                                        units,
                                        progress=True)

    speeds = list(glia.get_unit(bars_by_speed)[1].keys())

    for speed in sorted(speeds):
        print("Plotting acuity for speed {}".format(speed))
        plot_function = partial(plot_acuity_v3,
                                prepend=prepend,
                                append=append,
                                speed=speed)
        filename = "acuity-{}".format(speed)
        result = glia.plot_units(
            plot_function,
            partial(c_unit_fig, filename),
            solids,
            bars_by_speed,
            nplots=1,
            ncols=1,
            ax_xsize=5,
            ax_ysize=15,
            figure_title="Bars with speed {}".format(speed))

        plot_function = partial(plot_dissimilarity,
                                prepend=prepend,
                                append=append,
                                speed=speed)
        filename = "dissimilarity-{}".format(speed)
        result = glia.plot_units(
            plot_function,
            partial(c_unit_fig, filename),
            solids,
            bars_by_speed,
            nplots=1,
            ncols=1,
            ax_xsize=7,
            ax_ysize=7,
            figure_title="Dissimilarity matrix for bars with speed {}".format(
                speed))
Exemplo n.º 4
0
def plot_motion_sensitivity(fig, axis_gen, data, nwidths, speeds, prepend,
                            append):
    solids, bars_by_speed = data
    # We assume each band of widths is length 8 & first band is 2-16
    # each subsequent band is twice the width

    # the list is sorted so the final bar has longest lifespan
    max_lifespan = bars_by_speed[speeds[0]][nwidths -
                                            1]["stimulus"]["lifespan"]

    for i, speed in enumerate(speeds):
        base_width = 2**(i + 1)
        next_width = base_width
        widths = {base_width * (n + 1) for n in range(8)}
        bars_to_plot = list(
            filter(lambda x: x["stimulus"]["width"] in widths,
                   bars_by_speed[speed]))
        bars_to_plot.sort(key=lambda x: x["stimulus"]["width"])
        try:
            assert len(bars_to_plot) == 8
        except:
            print("width", widths)
            print([bar["stimulus"] for bar in bars_to_plot])
            raise

        xlim = glia.axis_continuation(
            lambda axis: axis.set_xlim(0, max_lifespan))
        ylim = glia.axis_continuation(lambda axis: axis.set_ylim(0, 8))
        bar_text = glia.axis_continuation(
            partial(c_plot_bar,
                    title="Bars with speed: {} & base width: {}".format(
                        speed, base_width)))
        glia.plot_spike_trains(axis_gen,
                               bars_to_plot,
                               continuation=glia.compose(xlim, bar_text, ylim))

        if i == len(speeds) - 1:
            # After the last speed, Plot solid
            lifespans = set()
            for w in widths:
                light_duration = int(np.ceil(w / speed))
                lifespans.add(light_duration)

            light_wedge = glia.compose(
                partial(filter,
                        lambda x: x["stimulus"]["lifespan"] in lifespans),
                partial(sorted, key=lambda x: x["stimulus"]["lifespan"]))

            sorted_solids = light_wedge(solids)
            glia.plot_spike_trains(axis_gen,
                                   sorted_solids,
                                   prepend,
                                   append,
                                   continuation=xlim)
Exemplo n.º 5
0
def save_acuity_direction(units, stimulus_list, c_unit_fig,
                          c_add_retina_figure):
    "Make one direction plot per speed"
    get_direction = glia.compose(
        glia.f_create_experiments(stimulus_list),
        glia.f_has_stimulus_type(["BAR"]),
        partial(filter, lambda x: x["stimulus"]["barColor"] == "white"),
        partial(sorted, key=lambda e: e["stimulus"]["angle"]),
        partial(glia.group_by,
                key=lambda x: x["stimulus"]["speed"],
                value=lambda x: x))

    response = glia.apply_pipeline(get_direction, units, progress=True)

    speeds = list(glia.get_unit(response)[1].keys())
    nspeeds = len(speeds)

    for speed in sorted(speeds):
        print("Plotting DS for speed {}".format(speed))
        plot_function = partial(plot_unit_response_for_speed, speed=speed)
        filename = "direction-{}".format(speed)
        glia.plot_units(
            plot_function,
            partial(c_unit_fig, filename),
            response,
            subplot_kw={"projection": "polar"},
            ax_xsize=7,
            ax_ysize=7,
            figure_title="Units spike train for speed {}".format(speed),
            transpose=True)
Exemplo n.º 6
0
def simulated_test(units, stimulus_list):
    # assert len(next(iter(units.values())).spike_train)==2200

    test_pipeline = glia.compose(glia.f_create_experiments(stimulus_list),
                                 glia.f_has_stimulus_type(["GRATING"]),
                                 glia.f_group_by_stimulus(),
                                 glia.f_calculate_firing_rate_by_stimulus())

    firing_rates = glia.apply_pipeline(test_pipeline, units, progress=True)
    for stimulus, rates in next(iter(firing_rates.values())).items():
        for rate in rates:
            assert np.isclose(rate, 60, 1)
Exemplo n.º 7
0
def get_fr_dsi_osi(units, stimulus_list):

    get_bar_firing_rate = glia.compose(
        glia.f_create_experiments(stimulus_list),
        glia.f_has_stimulus_type(["BAR"]),
        glia.f_group_by_stimulus(),
        glia.f_calculate_firing_rate_by_stimulus(),
    )
    bar_firing_rate = glia.apply_pipeline(get_bar_firing_rate,
                                          units,
                                          progress=True)

    get_bar_dsi = glia.compose(glia.by_speed_width_then_angle,
                               glia.calculate_dsi_by_speed_width)
    bar_dsi = glia.apply_pipeline(get_bar_dsi, bar_firing_rate, progress=True)

    get_bar_osi = glia.compose(glia.by_speed_width_then_angle,
                               glia.calculate_osi_by_speed_width)
    bar_osi = glia.apply_pipeline(get_bar_osi, bar_firing_rate, progress=True)

    return (bar_firing_rate, bar_dsi, bar_osi)
Exemplo n.º 8
0
def save_unit_spike_trains(units, stimulus_list, c_unit_fig, c_add_retina_figure, prepend, append):
    print("Creating solid unit spike trains")

    get_solid = glia.compose(
        glia.f_create_experiments(stimulus_list,prepend_start_time=prepend,append_lifespan=append),
        glia.f_has_stimulus_type(["SOLID"]),
    )
    response = glia.apply_pipeline(get_solid,units, progress=True)
    plot_function = partial(plot_spike_trains,prepend_start_time=prepend,append_lifespan=append)
    result = glia.plot_units(plot_function,response,ncols=1,ax_xsize=10, ax_ysize=5)
    c_unit_fig(result)
    glia.close_figs([fig for the_id,fig in result])
Exemplo n.º 9
0
def save_integrity_chart(units, stimulus_list, c_unit_fig, c_add_retina_figure):
    print("Creating integrity chart")

    get_solid = glia.compose(
        glia.f_create_experiments(stimulus_list,prepend_start_time=1,append_lifespan=2),
        glia.f_has_stimulus_type(["SOLID"]),
        filter_lifespan
    )
    response = glia.apply_pipeline(get_solid,units, progress=True)
    plot_function = partial(plot_spike_trains,prepend_start_time=1,append_lifespan=2)
    glia.plot_units(plot_function,c_unit_fig,response,ncols=1,ax_xsize=10, ax_ysize=5,
                             figure_title="Integrity Test (5 Minute Spacing)")
Exemplo n.º 10
0
Arquivo: solid.py Projeto: tbenst/glia
def save_unit_spike_trains(units, stimulus_list, c_add_unit_figures, c_add_retina_figure, prepend, append):
    print("Creating solid unit spike trains")
    
    get_solid = glia.compose(
        glia.f_create_experiments(stimulus_list,prepend_start_time=prepend,append_lifespan=append),
        glia.f_has_stimulus_type(["SOLID"]),
    )
    response = glia.apply_pipeline(get_solid,units)
    plot_function = partial(plot_spike_trains,prepend_start_time=prepend,append_lifespan=append)
    result = glia.plot_units(plot_function,response,ncols=1,ax_xsize=10, ax_ysize=5)
    c_add_unit_figures(result)
    glia.close_figs([fig for the_id,fig in result])
Exemplo n.º 11
0
def save_unit_spike_trains(units,
                           stimulus_list,
                           c_add_unit_figures,
                           c_add_retina_figure,
                           by='angle'):
    print("Creating bar unit spike trains")
    if by == 'angle':
        get_solid = glia.compose(
            glia.f_create_experiments(stimulus_list),
            glia.f_has_stimulus_type(["BAR"]),
            partial(sorted, key=lambda e: e["stimulus"]["angle"]),
        )
        nplots = get_nplots(stimulus_list, by)
        response = glia.apply_pipeline(get_solid, units, progress=True)
        result = glia.plot_units(plot_spike_trains_by_angle,
                                 response,
                                 nplots=nplots,
                                 ncols=3,
                                 ax_xsize=10,
                                 ax_ysize=5,
                                 figure_title="Unit spike train by BAR angle")
    elif by == 'width':
        get_solid = glia.compose(
            glia.f_create_experiments(stimulus_list),
            glia.f_has_stimulus_type(["BAR"]),
            partial(sorted, key=lambda e: e["stimulus"]["width"]),
        )
        nplots = get_nplots(stimulus_list, by)
        response = glia.apply_pipeline(get_solid, units, progress=True)
        result = glia.plot_units(plot_spike_trains_by_trial,
                                 response,
                                 nplots=nplots,
                                 ncols=3,
                                 ax_xsize=10,
                                 ax_ysize=5,
                                 figure_title="Unit spike train by BAR angle")

    # nplots = len(speed_widths)
    c_add_unit_figures(result)
    glia.close_figs([fig for the_id, fig in result])
Exemplo n.º 12
0
Arquivo: solid.py Projeto: tbenst/glia
def save_unit_psth(units, stimulus_list, c_add_unit_figures, c_add_retina_figure, prepend, append):
    print("Creating solid unit PSTH")

    get_psth = glia.compose(
        glia.f_create_experiments(stimulus_list,prepend_start_time=prepend,append_lifespan=append),
        glia.f_has_stimulus_type(["SOLID"]),
        glia.f_group_by_stimulus(),
        glia.concatenate_by_stimulus
    )
    psth = glia.apply_pipeline(get_psth,units)
    plot_function = partial(plot_psth,prepend_start_time=prepend,append_lifespan=append)
    result = glia.plot_units(partial(plot_function,bin_width=0.01),psth,ax_xsize=10, ax_ysize=5)
    c_add_unit_figures(result)
    glia.close_figs([fig for the_id,fig in result])
Exemplo n.º 13
0
def save_raster(units, stimulus_list, c_unit_fig, c_add_retina_figure,
        sort_by=glia.group_lifespan):
    print("Creating spike train raster plot")
    
    get_solid = glia.compose(
        glia.f_create_experiments(stimulus_list),
        partial(glia.group_by,
            key=lambda x: x["stimulus"]["metadata"]["group"]),
        glia.group_dict_to_list,
        partial(sorted,key=sort_by)
    )
    response = glia.apply_pipeline(get_solid,units, progress=True)
    glia.plot_units(glia.raster_group,c_unit_fig,response,nplots=1,
        ncols=1,ax_xsize=15, ax_ysize=10)
Exemplo n.º 14
0
def save_unit_psth(units, stimulus_list, c_unit_fig, c_add_retina_figure, prepend, append):
    print("Creating solid unit PSTH")

    get_psth = glia.compose(
        glia.f_create_experiments(stimulus_list,prepend_start_time=prepend,append_lifespan=append),
        glia.f_has_stimulus_type(["SOLID"]),
        glia.f_group_by_stimulus(),
        glia.concatenate_by_stimulus
    )
    psth = glia.apply_pipeline(get_psth,units, progress=True)
    plot_function = partial(plot_psth,prepend_start_time=prepend,append_lifespan=append)
    result = glia.plot_units(partial(plot_function,bin_width=0.01),psth,ax_xsize=10, ax_ysize=5)
    c_unit_fig(result)
    glia.close_figs([fig for the_id,fig in result])
Exemplo n.º 15
0
def simulated_test(units, stimulus_list):
	assert len(next(iter(units.values())).spike_train)==2200

	test_pipeline = glia.compose(
	    glia.f_create_experiments(stimulus_list),
	    glia.f_has_stimulus_type(["GRATING"]),
	    glia.f_group_by_stimulus(),
	    glia.f_calculate_firing_rate_by_stimulus()
	)

	firing_rates = glia.apply_pipeline(test_pipeline, units)
	for stimulus,rates in next(iter(firing_rates.values())).items():
		for rate in rates:
			assert rate==1
Exemplo n.º 16
0
def save_unit_wedges_v2(units, stimulus_list, c_unit_fig, c_add_retina_figure):
    print("Creating solid unit wedges")

    get_solid = glia.compose(
        glia.f_create_experiments(stimulus_list),
        glia.f_has_stimulus_type(["SOLID","WAIT"]),
        partial(glia.group_by,
            key=lambda x: x["stimulus"]["metadata"]["group"]),
        glia.group_dict_to_list,
        partial(sorted,key=lambda x: get_lifespan(x[1]))
    )
    response = glia.apply_pipeline(get_solid,units, progress=True)

    glia.plot_units(plot_spike_train_triplet,c_unit_fig,response,nplots=1,
        ncols=1,ax_xsize=10, ax_ysize=5)
Exemplo n.º 17
0
Arquivo: bar.py Projeto: tbenst/glia
def get_fr_dsi_osi(units, stimulus_list):

    get_bar_firing_rate = glia.compose(
        glia.f_create_experiments(stimulus_list),
        glia.f_has_stimulus_type(["BAR"]),
        glia.f_group_by_stimulus(),
        glia.f_calculate_firing_rate_by_stimulus(),
    )
    bar_firing_rate = glia.apply_pipeline(get_bar_firing_rate,units)

    get_bar_dsi = glia.compose(
        glia.by_speed_width_then_angle,
        glia.calculate_dsi_by_speed_width
    )
    bar_dsi = glia.apply_pipeline(get_bar_dsi,bar_firing_rate)

    get_bar_osi = glia.compose(
        glia.by_speed_width_then_angle,
        glia.calculate_osi_by_speed_width
    )
    bar_osi = glia.apply_pipeline(get_bar_osi,bar_firing_rate)


    return (bar_firing_rate, bar_dsi, bar_osi)
Exemplo n.º 18
0
def save_unit_kinetics(units, stimulus_list, c_unit_fig, c_add_retina_figure):
    print("Creating solid unit kinetics")

    get_solid = glia.compose(
        glia.f_create_experiments(stimulus_list),
        partial(glia.group_by,
            key=lambda x: x["stimulus"]["metadata"]["group"]),
        glia.group_dict_to_list,
        partial(sorted,key=lambda x: get_lifespan(x[2]))
    )
    response = glia.apply_pipeline(get_solid,units, progress=True)

    # glia.plot_units(plot_group_spike_train,c_unit_fig,response,nplots=1,
    #     ncols=1,ax_xsize=10, ax_ysize=5)
    glia.plot_units(glia.raster_group,c_unit_fig,response,nplots=1,
        ncols=1,ax_xsize=10, ax_ysize=5)
Exemplo n.º 19
0
def save_unit_kinetics_v1(units, stimulus_list, c_unit_fig, c_add_retina_figure):
    print("Creating solid unit kinetics")


    for i in range(5):
        s = i*150
        e = (i+1)*150
        get_solid = glia.compose(
            glia.f_create_experiments(stimulus_list),
            lambda x: x[s:e],
            partial(glia.group_by,
                key=lambda x: x["stimulus"]["metadata"]["group"]),
            glia.group_dict_to_list,
            partial(sorted,key=lambda x: get_lifespan(x[2]))
        )
        response = glia.apply_pipeline(get_solid,units, progress=True)
        c = partial(c_unit_fig,"kinetics-{}".format(i))
        glia.plot_units(glia.raster_group,c,response,nplots=1,
            ncols=1,ax_xsize=10, ax_ysize=5)
Exemplo n.º 20
0
def save_unit_wedges(units, stimulus_list, c_unit_fig, c_add_retina_figure, prepend, append):
    print("Creating solid unit wedges")

    get_solid = glia.compose(
        glia.f_create_experiments(stimulus_list,prepend_start_time=prepend,append_lifespan=append),
        glia.f_has_stimulus_type(["SOLID"]),
        partial(sorted,key=lambda x: x["stimulus"]["lifespan"])
    )
    response = glia.apply_pipeline(get_solid,units, progress=True)

    colors = set()
    for solid in glia.get_unit(response)[1]:
        colors.add(solid["stimulus"]["backgroundColor"])
    ncolors = len(colors)

    plot_function = partial(plot_spike_trains,prepend_start_time=prepend,
        append_lifespan=append)
    glia.plot_units(plot_function,c_unit_fig,response,nplots=ncolors,
        ncols=min(ncolors,5),ax_xsize=10, ax_ysize=5)
Exemplo n.º 21
0
def save_integrity_chart_vFail(units, stimulus_list, c_unit_fig, c_add_retina_figure):
    print("Creating integrity chart")
    get_solid = glia.compose(
        glia.f_create_experiments(stimulus_list),
        glia.filter_integrity,
        partial(glia.group_by,
            key=lambda x: x["stimulus"]["metadata"]["group"]),
        glia.group_dict_to_list,
        integrity_fix_hack,
        partial(sorted,key=lambda x: x[0]["stimulus"]["stimulusIndex"])
        )

    response = glia.apply_pipeline(get_solid,units, progress=True)
    plot_function = partial(glia.raster_group)
    # c = partial(c_unit_fig,"kinetics-{}".format(i))

    glia.plot_units(plot_function,c_unit_fig,response,ncols=1,ax_xsize=10, ax_ysize=5,
                             figure_title="Integrity Test (5 Minute Spacing)")

    units_accuracy = glia.pmap(ideal_unit_classification_accuracy, response)
    c_add_retina_figure("integrity_accuracy",plot_units_accuracy(units_accuracy))
Exemplo n.º 22
0
def filter_units_by_accuracy(units, stimulus_list, threshold=0.8):
    ntrial = len(list(filter(
        lambda x: 'metadata' in x['stimulus'] and "label" in x['stimulus']['metadata'] and \
            x['stimulus']['metadata']['label']=='integrity',
        stimulus_list)))/3
    ntrain = int(np.ceil(ntrial/2))
    ntest = int(np.floor(ntrial/2))
    tvt = glia.TVT(ntrain,ntest,0)

    get_solid = glia.compose(
        glia.f_create_experiments(stimulus_list),
        glia.filter_integrity,
        partial(glia.group_by,
            key=lambda x: x["stimulus"]["metadata"]["group"]),
        glia.group_dict_to_list,
        glia.f_split_list(tvt)
    )

    classification_data = glia.apply_pipeline(get_solid,units, progress=True)
    units_accuracy = glia.pmap(unit_classification_accuracy,classification_data)
    filter_threshold = glia.f_filter(lambda k,x: x['on']>threshold or x['off']>threshold)
    return set(filter_threshold(units_accuracy).keys())
Exemplo n.º 23
0
def save_integrity_chart_v2(units, stimulus_list, c_unit_fig, c_add_retina_figure):
    print("Creating integrity chart")
    get_integrity= glia.compose(
        glia.f_create_experiments(stimulus_list),
        glia.filter_integrity,
        partial(glia.group_by,
            key=lambda x: x["stimulus"]["metadata"]["group"]),
        glia.group_dict_to_list,
        )
    response = glia.apply_pipeline(get_integrity,units, progress=True)
    chronological = glia.apply_pipeline(
        partial(sorted,key=lambda x: x[0]["stimulus"]["stimulusIndex"]),
        response)

    plot_function = partial(glia.raster_group)
    # c = partial(c_unit_fig,"kinetics-{}".format(i))

    glia.plot_units(plot_function,c_unit_fig,chronological,ncols=1,ax_xsize=10, ax_ysize=5,
                             figure_title="Integrity Test (5 Minute Spacing)")

    ntrial = len(glia.get_unit(response)[1])
    ntrain = int(np.ceil(ntrial/2))
    ntest = int(np.floor(ntrial/2))
    tvt = glia.TVT(ntrain,ntest,0)
    classification_data = glia.apply_pipeline(
        glia.f_split_list(tvt),
        response)

    units_accuracy = glia.pmap(unit_classification_accuracy,classification_data)
    plot_directory = os.path.join(config.plot_directory,"00-all")
    os.makedirs(plot_directory, exist_ok=True)
    with open(plot_directory + "/best_units.txt", "w") as f:
        sorted_units = sorted(units_accuracy.items(),
            key=lambda z: max(z[1]["off"],z[1]["on"]),
            reverse=True)
        for u in sorted_units:
            f.write(str(u)+"\n")
    c_add_retina_figure("integrity_accuracy",plot_units_accuracy(units_accuracy))
Exemplo n.º 24
0
def save_unit_spike_trains(units,
                           stimulus_list,
                           c_unit_fig,
                           c_add_retina_figure,
                           width=None,
                           height=None):
    print("Creating grating unit spike trains")

    get_solid = glia.compose(glia.f_create_experiments(stimulus_list),
                             glia.f_has_stimulus_type(["GRATING"]),
                             glia.f_split_by_wavelength())
    response = glia.apply_pipeline(get_solid, units, progress=True)

    nplots = len(glia.get_unit(response)[1])
    result = glia.plot_units(
        plot_spike_trains,
        response,
        nplots=nplots,
        ncols=3,
        ax_xsize=10,
        ax_ysize=5,
        figure_title="Unit spike train by GRATING waveperiod")
    c_unit_fig(result)
    glia.close_figs([fig for the_id, fig in result])
Exemplo n.º 25
0
def save_images_h5(units, stimulus_list, name, frame_log, video_file, append):
    """Assumes each group is three stimuli with image in second position.
    
    Concatenate second stimuli with first 0.5s of third stimuli"""
    # open first so if there's a problem we don't waste time
    compression_level = 3
    dset_filter = tables.filters.Filters(complevel=compression_level,
                                         complib='blosc:zstd')
    with tables.open_file(name + ".h5", 'w') as h5:
        class_resolver = get_classes_from_stimulus_list(stimulus_list)
        nclasses = len(class_resolver)
        frames, image_classes = glia.get_images_from_vid(
            stimulus_list, frame_log, video_file)

        image_class_num = list(
            map(lambda x: class_resolver[str(x)], image_classes))
        idx_sorted_order = np.argsort(image_class_num)

        # save mapping of class_num target to class metadata
        # this way h5.root.image_classes[n] will give the class metadata string
        logger.info("create class_resolver with max string of 256")
        resolver = h5.create_carray(h5.root, "image_classes",
                                    tables.StringAtom(itemsize=256),
                                    (nclasses, ))
        img_class_array = np.array(image_classes,
                                   dtype="S256")[idx_sorted_order]
        for i, image_class in enumerate(img_class_array):
            resolver[i] = image_class

        atom = tables.Atom.from_dtype(frames[0].dtype)
        images = h5.create_carray(h5.root,
                                  "images",
                                  atom, (nclasses, *frames[0].shape),
                                  filters=dset_filter)

        frames = np.array(frames)
        nFrames = len(frames)
        for i, idx in enumerate(idx_sorted_order):
            if idx >= nFrames:
                logger.warn(
                    f"skipping class {image_classes[idx]} as no accompanying frame. This should only occur if experiment stopped early."
                )
                continue
            images[i] = frames[idx]

        print("finished saving images")
        get_image_responses = glia.compose(
            # returns a list
            partial(glia.create_experiments,
                    stimulus_list=stimulus_list,
                    progress=True,
                    append_lifespan=append),
            partial(glia.group_by, key=lambda x: x["metadata"]["group"]),
            glia.group_dict_to_list,
            glia.f_filter(partial(glia.group_contains, "IMAGE")),
            # truncate to 0.5s
            glia.f_map(lambda x: [x[1], truncate(x[2], 0.5)]),
            glia.f_map(glia.merge_experiments),
            partial(glia.group_by, key=lambda x: x["metadata"]["cohort"]),
            # glia.f_map(f_flatten)
        )

        image_responses = get_image_responses(units)
        ncohorts = len(image_responses)
        ex_cohort = glia.get_value(image_responses)
        images_per_cohort = len(ex_cohort)
        print("images_per_cohort", images_per_cohort)
        duration = ex_cohort[0]["lifespan"]

        d = int(np.ceil(duration * 1000))  # 1ms bins
        logger.info(f"ncohorts: {ncohorts}")
        # import pdb; pdb.set_trace()

        logger.info(f"nclasses: {nclasses}")
        if nclasses < 256:
            class_dtype = np.dtype('uint8')
        else:
            class_dtype = np.dtype('uint16')

        class_resolver_func = lambda c: class_resolver[str(c)]

        # determine shape
        experiments = glia.flatten_group_dict(image_responses)
        nE = len(experiments)
        d = int(np.ceil(duration * 1000))  # 1ms bins
        data_shape = (nE, d, Unit.nrow, Unit.ncol, Unit.nunit)

        print(f"writing to {name}.h5 with zstd compression...")
        data = h5.create_carray("/",
                                "data",
                                tables.Atom.from_dtype(np.dtype('uint8')),
                                shape=data_shape,
                                filters=dset_filter)
        target = h5.create_carray("/",
                                  "target",
                                  tables.Atom.from_dtype(class_dtype),
                                  shape=(nE, ),
                                  filters=dset_filter)

        glia.experiments_to_h5(experiments,
                               data,
                               target,
                               partial(get_image_class_from_stim,
                                       class_resolver=class_resolver_func),
                               append,
                               class_dtype=class_dtype)
Exemplo n.º 26
0
def save_letter_npz(units, stimulus_list, name, append):
    print(
        "Saving letter NPZ file. Warning: not including Off response--performance can be improved!"
    )
    # TODO use merge_experiment
    # TODO add TEST!!!
    get_letters = glia.compose(
        partial(glia.create_experiments,
                stimulus_list=stimulus_list,
                progress=True,
                append_lifespan=append),
        partial(glia.group_by, key=lambda x: x["metadata"]["group"]),
        glia.group_dict_to_list, glia.f_filter(group_contains_letter),
        glia.f_map(lambda x: x[0:2]),
        partial(glia.group_by, key=lambda x: x[1]["size"]),
        glia.f_map(
            partial(glia.group_by, key=lambda x: x[1]["metadata"]["cohort"])),
        glia.f_map(glia.f_map(f_flatten)),
        glia.f_map(glia.f_map(balance_blanks)))
    letters = get_letters(units)
    sizes = sorted(list(letters.keys()))
    nsizes = len(sizes)
    ncohorts = len(list(letters.values())[0])
    ex_letters = glia.get_value(list(letters.values())[0])
    nletters = len(ex_letters)
    print("nletters", nletters)
    duration = ex_letters[0]["lifespan"]

    d = int(np.ceil(duration * 1000))  # 1ms bins
    nunits = len(units.keys())
    tvt = glia.tvt_by_percentage(ncohorts, 60, 40, 0)
    logger.info(f"{tvt}, ncohorts: {ncohorts}")

    experiments_per_cohort = 11
    training_data = np.full((nsizes, tvt.training * experiments_per_cohort, d,
                             Unit.nrow, Unit.ncol, Unit.nunit),
                            0,
                            dtype='int8')
    training_target = np.full((nsizes, tvt.training * experiments_per_cohort),
                              0,
                              dtype='int8')
    validation_data = np.full((nsizes, tvt.validation * experiments_per_cohort,
                               d, Unit.nrow, Unit.ncol, Unit.nunit),
                              0,
                              dtype='int8')
    validation_target = np.full(
        (nsizes, tvt.validation * experiments_per_cohort), 0, dtype='int8')

    size_map = {s: i for i, s in enumerate(sizes)}
    for size, cohorts in letters.items():
        X = glia.f_split_dict(tvt)(cohorts)
        logger.info(f"ncohorts: {len(cohorts)}")
        td, tt = glia.experiments_to_ndarrays(glia.training_cohorts(X),
                                              letter_class, append)
        logger.info(td.shape)
        missing_duration = d - td.shape[1]
        pad_td = np.pad(td, ((0, 0), (0, missing_duration), (0, 0), (0, 0),
                             (0, 0)),
                        mode='constant')
        size_index = size_map[size]
        training_data[size_index] = pad_td
        training_target[size_index] = tt

        td, tt = glia.experiments_to_ndarrays(glia.validation_cohorts(X),
                                              letter_class, append)
        pad_td = np.pad(td, ((0, 0), (0, missing_duration), (0, 0), (0, 0),
                             (0, 0)),
                        mode='constant')
        validation_data[size_index] = pad_td
        validation_target[size_index] = tt

    np.savez(name,
             training_data=training_data,
             training_target=training_target,
             validation_data=validation_data,
             validation_target=validation_target)
Exemplo n.º 27
0
def compose_test():
	test = glia.compose(lambda x: 3*x, lambda y: y**2, lambda z: z+1)
	assert test(4) == 145
Exemplo n.º 28
0
def save_eyechart_npz(units, stimulus_list, name, append=0.5):
    print("Saving eyechart NPZ file.")

    # TODO add blanks
    get_letters = glia.compose(
        partial(glia.create_experiments,
                stimulus_list=stimulus_list,
                append_lifespan=append),
        partial(glia.group_by, key=lambda x: x["metadata"]["group"]),
        glia.group_dict_to_list,
        glia.f_filter(group_contains_letter),
        glia.f_map(lambda x: adjust_lifespan(x[1])),
        partial(glia.group_by, key=lambda x: x["size"]),
        glia.f_map(
            partial(glia.group_by,
                    key=lambda x: x[1]["stimulus"]["metadata"]["cohort"])),
        glia.f_map(glia.f_map(f_flatten)),
    )
    letters = get_letters(units)
    sizes = sorted(list(letters.keys()))
    nsizes = len(sizes)
    ncohorts = len(list(letters.values())[0])
    ex_letters = glia.get_a_value(list(letters.values())[0])
    nletters = len(ex_letters)
    print("nletters", nletters)
    duration = ex_letters[0]["lifespan"]
    d = int(np.ceil(duration * 1000))  # 1ms bins
    nunits = len(units.keys())
    tvt = glia.tvt_by_percentage(ncohorts, 60, 40, 0)
    logger.info(f"{tvt}, {ncohorts}")
    training_data = np.full((nsizes, tvt.training, d, nunits), 0, dtype='int8')
    training_target = np.full((nsizes, tvt.training), 0, dtype='int8')
    validation_data = np.full((nsizes, tvt.validation, d, nunits),
                              0,
                              dtype='int8')
    validation_target = np.full((nsizes, tvt.validation), 0, dtype='int8')
    test_data = np.full((nsizes, tvt.test, d, nunits), 0, dtype='int8')
    test_target = np.full((nsizes, tvt.test), 0, dtype='int8')

    size_map = {s: i for i, s in enumerate(sizes)}
    for size, experiments in letters.items():
        split = glia.f_split_dict(tvt)
        flatten_cohort = glia.compose(glia.group_dict_to_list, f_flatten)
        X = glia.tvt_map(split(experiments), flatten_cohort)

        td, tt = glia.experiments_to_ndarrays(X.training, letter_class, append)
        size_index = size_map[size]
        training_data[size_index] = td
        training_target[size_index] = tt

        td, tt = glia.experiments_to_ndarrays(X.validation, letter_class,
                                              append)
        validation_data[size_index] = td
        validation_target[size_index] = tt

        td, tt = glia.experiments_to_ndarrays(X.test, letter_class, append)
        test_data[size_index] = td
        test_target[size_index] = tt

    np.savez(name,
             training_data=training_data,
             training_target=training_target,
             validation_data=validation_data,
             validation_target=validation_target)
Exemplo n.º 29
0
def plot_solid_versus_bar_for_speed(fig, axis_gen, data, prepend, append,
                                    speed):
    # also assumes 5 contrasts
    logger.debug("plot solid versus bar for speed")

    solids, bars_by_speed = data
    bars = bars_by_speed[speed]
    max_lifespan = max(
        bars, key=lambda e: e["stimulus"]["lifespan"])["stimulus"]["lifespan"]
    lifespans = set()
    widths = set()
    colors = set()
    for e in bars:
        width = e["stimulus"]["width"]
        widths.add(width)
        color = e["stimulus"]["barColor"]
        # need to calculate duration of light over a particular point
        light_duration = int(np.ceil(width / speed))

        lifespans.add(light_duration)
        colors.add(color)

    logger.debug("lifespans {}, widths {}".format(len(lifespans), len(widths)))
    # WARNING
    # assert len(lifespans)==len(widths)

    # keep charts aligned by row
    bar_ymap = {w: i for i, w in enumerate(sorted(list(widths)))}
    solid_ymap = {l: i for i, l in enumerate(sorted(list(lifespans)))}

    # used to map stimulus to proper row
    c_bar_ymap = lambda s: bar_ymap[s["width"]]
    c_solid_ymap = lambda s: solid_ymap[s["lifespan"]]

    sorted_colors = sorted(list(colors), reverse=True)

    ntrials = len(lifespans)
    xlim = glia.axis_continuation(lambda axis: axis.set_xlim(0, max_lifespan))
    ylim = glia.axis_continuation(lambda axis: axis.set_ylim(0, ntrials))
    color_text = lambda x: glia.axis_continuation(
        partial(c_plot_bar, title="Color: {}".format(x)))
    solid_continuation = lambda x: glia.compose(ylim, xlim, color_text(x))

    bar_text = glia.axis_continuation(partial(c_plot_bar, title="Bar"))
    bar_continuation = glia.compose(ylim, xlim, bar_text)

    # we plot the solid first so they are all on the same row
    for color in sorted_colors:
        logger.debug('plotting SOLID for {}'.format(color))
        light_wedge = glia.compose(
            partial(filter, lambda x: x["stimulus"]["lifespan"] in lifespans),
            partial(filter,
                    lambda x: x["stimulus"]["backgroundColor"] == color),
            partial(sorted, key=lambda x: x["stimulus"]["lifespan"]))

        sorted_solids = light_wedge(solids)
        logger.debug(
            'Solid lifespans are: ' +
            ",".join([str(s["stimulus"]["lifespan"]) for s in sorted_solids]))
        glia.plot_spike_trains(axis_gen,
                               sorted_solids,
                               prepend,
                               append,
                               continuation=solid_continuation(color),
                               ymap=c_solid_ymap)

    for color in sorted_colors:
        filtered_bars = glia.compose(
            partial(filter, lambda x: x["stimulus"]["barColor"] == color),
            partial(sorted, key=lambda x: x["stimulus"]["width"]))
        sorted_bars = filtered_bars(bars)
        logger.debug('plotting BAR for {}'.format(color))
        logger.debug(
            'Bar widths are: ' +
            ",".join([str(s["stimulus"]["width"]) for s in sorted_bars]))
        glia.plot_spike_trains(axis_gen,
                               sorted_bars,
                               continuation=bar_continuation,
                               ymap=c_bar_ymap)
Exemplo n.º 30
0
from warnings import warn
import sklearn.metrics as metrics
from sklearn import manifold
from sklearn.metrics import euclidean_distances
import itertools
import elephant
from math import isclose
from neo.core import SpikeTrain
import quantities


def distance_by_row(similarity):
    return list(map(lambda x: np.std(x), similarity))


def truncate_experiment(max_time, experiment):
    new = experiment.copy()
    train = experiment["spikes"]
    new["spikes"] = train[np.where(train < max_time)]
    return new


# i = glia.compose(
#     glia.f_create_experiments(stimulus_list),
#     glia.f_has_stimulus_type(["SOLID"]),
#     partial(filter,lambda x: isclose(x["stimulus"]["lifespan"],0.5)),
#     lambda x: list(x),
# )

icentroid = glia.compose(distance_by_row, np.min)
Exemplo n.º 31
0
def save_acuity_image_npz(units, stimulus_list, name, append):
    "Assumes metadata includes a parameter to group by, as well as a blank image"

    get_letters = glia.compose(
        partial(glia.create_experiments,
                stimulus_list=stimulus_list,
                progress=True,
                append_lifespan=append),
        partial(glia.group_by, key=lambda x: x["metadata"]["group"]),
        glia.group_dict_to_list,
        glia.f_filter(partial(glia.group_contains, "IMAGE")),
        glia.f_map(lambda x: x[0:2]),
        partial(glia.group_by, key=lambda x: x[1]["metadata"]["parameter"]),
        glia.f_map(
            partial(glia.group_by, key=lambda x: x[1]["metadata"]["cohort"])),
        glia.f_map(glia.f_map(f_flatten)),
        glia.f_map(glia.f_map(partial(balance_blanks, key='image'))))
    letters = get_letters(units)
    sizes = sorted(list(letters.keys()))
    nsizes = len(sizes)
    ncohorts = len(list(letters.values())[0])
    ex_letters = glia.get_value(list(letters.values())[0])
    nletters = len(ex_letters)
    print("nletters", nletters)
    duration = ex_letters[0]["lifespan"]

    # small hack to fix bug in letters 0.2.0
    letter_duration = ex_letters[1]['lifespan']
    if duration != letter_duration:
        new_letters = {}
        for size, cohorts in letters.items():
            new_letters[size] = {}
            for cohort, stimuli in cohorts.items():
                new_letters[size][cohort] = list(
                    map(lambda s: truncate(s, letter_duration), stimuli))
        letters = new_letters

    d = int(np.ceil(duration * 1000))  # 1ms bins
    nunits = len(units.keys())
    tvt = glia.tvt_by_percentage(ncohorts, 60, 40, 0)
    logger.info(f"{tvt}, ncohorts: {ncohorts}")

    experiments_per_cohort = 11
    training_data = np.full((nsizes, tvt.training * experiments_per_cohort, d,
                             Unit.nrow, Unit.ncol, Unit.nunit),
                            0,
                            dtype='int8')
    training_target = np.full((nsizes, tvt.training * experiments_per_cohort),
                              0,
                              dtype='int8')
    validation_data = np.full((nsizes, tvt.validation * experiments_per_cohort,
                               d, Unit.nrow, Unit.ncol, Unit.nunit),
                              0,
                              dtype='int8')
    validation_target = np.full(
        (nsizes, tvt.validation * experiments_per_cohort), 0, dtype='int8')

    size_map = {s: i for i, s in enumerate(sizes)}
    for size, cohorts in letters.items():
        X = glia.f_split_dict(tvt)(cohorts)
        logger.info(f"ncohorts: {len(cohorts)}")
        td, tt = glia.experiments_to_ndarrays(glia.training_cohorts(X),
                                              acuity_image_class, append)
        logger.info(td.shape)
        missing_duration = d - td.shape[1]
        pad_td = np.pad(td, ((0, 0), (0, missing_duration), (0, 0), (0, 0),
                             (0, 0)),
                        mode='constant')
        size_index = size_map[size]
        training_data[size_index] = pad_td
        training_target[size_index] = tt

        td, tt = glia.experiments_to_ndarrays(glia.validation_cohorts(X),
                                              acuity_image_class, append)
        pad_td = np.pad(td, ((0, 0), (0, missing_duration), (0, 0), (0, 0),
                             (0, 0)),
                        mode='constant')
        validation_data[size_index] = pad_td
        validation_target[size_index] = tt

    np.savez(name,
             training_data=training_data,
             training_target=training_target,
             validation_data=validation_data,
             validation_target=validation_target)
Exemplo n.º 32
0
def save_checkerboard_flicker_npz(units,
                                  stimulus_list,
                                  name,
                                  append,
                                  group_by,
                                  quad=False):
    "Psychophysics discrimination checkerboard 0.2.0"
    print("Saving checkerboard NPZ file.")

    get_checkers = glia.compose(
        partial(
            glia.create_experiments,
            progress=True,
            append_lifespan=append,
            # stimulus_list=stimulus_list,append_lifespan=0.5),
            stimulus_list=stimulus_list),
        partial(glia.group_by, key=lambda x: x["metadata"]["group"]),
        glia.group_dict_to_list,
        glia.f_filter(group_contains_checkerboard),
        glia.f_map(
            glia.f_filter(lambda x: x['stimulusType'] == 'CHECKERBOARD')),
        glia.f_map(glia.merge_experiments),
        partial(glia.group_by, key=group_by),
        glia.f_map(partial(glia.group_by, key=lambda x: x["size"])),
        glia.f_map(
            glia.f_map(
                partial(glia.group_by,
                        key=lambda x: x["metadata"]["cohort"]))))
    checkers = get_checkers(units)

    max_duration = 0.0
    for condition, sizes in checkers.items():
        for size, cohorts in sizes.items():
            for cohort, experiments in cohorts.items():
                max_duration = max(max_duration, experiments[0]['lifespan'])
    max_duration += append
    print(f"max_duration: {max_duration}")

    conditions = sorted(list(checkers.keys()))
    print("Conditions:", name, conditions)
    nconditions = len(conditions)
    example_condition = glia.get_value(checkers)
    sizes = sorted(list(example_condition.keys()))
    nsizes = len(sizes)
    # TODO remove
    if max_duration < 9:
        print(example_condition)

    example_size = glia.get_value(example_condition)
    ncohorts = len(example_size)
    # print(list(checkers.values()))
    d = int(np.ceil(max_duration * 1000))  # 1ms bins

    tvt = glia.tvt_by_percentage(ncohorts, 60, 40, 0)
    logger.info(f"{tvt}, {ncohorts}")
    # (TODO?) 2 dims for first checkerboard and second checkerboard
    # 4 per cohort
    if quad:
        ntraining = tvt.training * 4
        nvalid = tvt.validation * 4
    else:
        ntraining = tvt.training * 2
        nvalid = tvt.validation * 2

    training_data = np.full(
        (nconditions, nsizes, ntraining, d, Unit.nrow, Unit.ncol, Unit.nunit),
        0,
        dtype='int8')
    training_target = np.full((nconditions, nsizes, ntraining),
                              0,
                              dtype='int8')
    validation_data = np.full(
        (nconditions, nsizes, nvalid, d, Unit.nrow, Unit.ncol, Unit.nunit),
        0,
        dtype='int8')
    validation_target = np.full((nconditions, nsizes, nvalid), 0, dtype='int8')
    # test_data = np.full((nsizes,tvt.test,d,nunits),0,dtype='int8')
    # test_target = np.full((nsizes,tvt.test),0,dtype='int8')

    if quad:
        get_class = get_checker_quad_discrimination_class
    else:
        get_class = get_checker_discrimination_class
    condition_map = {c: i for i, c in enumerate(conditions)}
    size_map = {s: i for i, s in enumerate(sizes)}
    for condition, sizes in checkers.items():
        for size, cohorts in sizes.items():
            X = glia.f_split_dict(tvt)(cohorts)

            td, tt = glia.experiments_to_ndarrays(glia.training_cohorts(X),
                                                  get_class, append)
            logger.info(td.shape)
            missing_duration = d - td.shape[1]
            pad_td = np.pad(td, ((0, 0), (0, missing_duration), (0, 0), (0, 0),
                                 (0, 0)),
                            mode='constant')
            condition_index = condition_map[condition]
            size_index = size_map[size]
            training_data[condition_index, size_index] = pad_td
            training_target[condition_index, size_index] = tt

            td, tt = glia.experiments_to_ndarrays(glia.validation_cohorts(X),
                                                  get_class, append)
            pad_td = np.pad(td, ((0, 0), (0, missing_duration), (0, 0), (0, 0),
                                 (0, 0)),
                            mode='constant')
            validation_data[condition_index, size_index] = pad_td
            validation_target[condition_index, size_index] = tt

    print('saving to ', name)
    np.savez(name,
             training_data=training_data,
             training_target=training_target,
             validation_data=validation_data,
             validation_target=validation_target)
Exemplo n.º 33
0
def compose_test():
    test = glia.compose(lambda x: 3 * x, lambda y: y**2, lambda z: z + 1)
    assert test(4) == 145
Exemplo n.º 34
0
def save_grating_npz(units,
                     stimulus_list,
                     name,
                     append,
                     group_by,
                     sinusoid=False):
    "Psychophysics discrimination grating 0.2.0"
    print("Saving grating NPZ file.")
    if sinusoid:
        stimulus_type = "SINUSOIDAL_GRATING"
    else:
        stimulus_type = 'GRATING'
    get_gratings = glia.compose(
        partial(glia.create_experiments,
                stimulus_list=stimulus_list,
                append_lifespan=append),
        glia.f_filter(lambda x: x['stimulusType'] == stimulus_type),
        partial(glia.group_by, key=group_by),
        glia.f_map(partial(glia.group_by, key=lambda x: x["width"])),
        glia.f_map(
            glia.f_map(
                partial(glia.group_by,
                        key=lambda x: x["metadata"]["cohort"]))))
    gratings = get_gratings(units)

    max_duration = 0.0
    for condition, sizes in gratings.items():
        for size, cohorts in sizes.items():
            for cohort, experiments in cohorts.items():
                max_duration = max(max_duration, experiments[0]['lifespan'])
    max_duration += append

    conditions = sorted(list(gratings.keys()))
    print("Conditions:", name, conditions)
    nconditions = len(conditions)
    example_condition = glia.get_value(gratings)
    sizes = sorted(list(example_condition.keys()))
    print("Sizes:", sizes)
    nsizes = len(sizes)

    example_size = glia.get_value(example_condition)
    ncohorts = len(example_size)
    # print(list(gratings.values()))
    d = int(np.ceil(max_duration * 1000))  # 1ms bins
    tvt = glia.tvt_by_percentage(ncohorts, 60, 40, 0)
    # 2 per cohort
    training_data = np.full((nconditions, nsizes, tvt.training * 2, d,
                             Unit.nrow, Unit.ncol, Unit.nunit),
                            0,
                            dtype='int8')
    training_target = np.full((nconditions, nsizes, tvt.training * 2),
                              0,
                              dtype='int8')
    validation_data = np.full((nconditions, nsizes, tvt.validation * 2, d,
                               Unit.nrow, Unit.ncol, Unit.nunit),
                              0,
                              dtype='int8')
    validation_target = np.full((nconditions, nsizes, tvt.validation * 2),
                                0,
                                dtype='int8')

    condition_map = {c: i for i, c in enumerate(conditions)}
    size_map = {s: i for i, s in enumerate(sizes)}
    for condition, sizes in gratings.items():
        for size, cohorts in sizes.items():
            X = glia.f_split_dict(tvt)(cohorts)

            td, tt = glia.experiments_to_ndarrays(glia.training_cohorts(X),
                                                  get_grating_class_from_stim,
                                                  append)
            missing_duration = d - td.shape[1]
            pad_td = np.pad(td, ((0, 0), (0, missing_duration), (0, 0), (0, 0),
                                 (0, 0)),
                            mode='constant')
            condition_index = condition_map[condition]
            size_index = size_map[size]
            training_data[condition_index, size_index] = pad_td
            training_target[condition_index, size_index] = tt

            td, tt = glia.experiments_to_ndarrays(glia.validation_cohorts(X),
                                                  get_grating_class_from_stim,
                                                  append)
            pad_td = np.pad(td, ((0, 0), (0, missing_duration), (0, 0), (0, 0),
                                 (0, 0)),
                            mode='constant')
            validation_data[condition_index, size_index] = pad_td
            validation_target[condition_index, size_index] = tt

    print('saving to ', name)
    np.savez(name,
             training_data=training_data,
             training_target=training_target,
             validation_data=validation_data,
             validation_target=validation_target)
Exemplo n.º 35
0
def plot_acuity_v3(fig, axis_gen, data, prepend, append, speed):
    logger.debug("plot solid versus bar for speed")

    ax = next(axis_gen)
    solids, bars_by_speed = data
    bars = bars_by_speed[speed]
    max_lifespan = max(
        bars, key=lambda e: e["stimulus"]["lifespan"])["stimulus"]["lifespan"]
    lifespans = set()
    widths = set()
    angles = set()
    for e in bars:
        width = e["stimulus"]["width"]
        angle = e["stimulus"]["angle"]
        widths.add(width)
        angles.add(angle)
        # need to calculate duration of light over a particular point
        light_duration = int(np.ceil(width / speed))

        lifespans.add(light_duration)

    logger.debug("lifespans {}, widths {}".format(len(lifespans), len(widths)))
    # WARNING
    # assert len(lifespans)==len(widths)

    # keep charts aligned by row
    angle_width = []
    for w in sorted(widths):
        for a in sorted(angles):
            angle_width.append((a, w))

    bar_ymap = {aw: i for i, aw in enumerate(angle_width)}

    # used to map stimulus to proper row
    c_bar_ymap = lambda s: bar_ymap[(s["angle"], s["width"])]

    nangles = len(angles)
    nwidths = len(widths)
    ny = nangles * nwidths

    xlim = glia.axis_continuation(lambda axis: axis.set_xlim(0, max_lifespan))
    ylim = glia.axis_continuation(lambda axis: axis.set_ylim(0, ny))

    bar_text = glia.axis_continuation(
        partial(c_plot_bar,
                title="{} angles x {} widths at {} px/s".format(
                    nangles, nwidths, speed)))
    bar_continuation = glia.compose(ylim, xlim, bar_text)

    for e in bars:
        spikes = e["spikes"]
        lifespan = e["stimulus"]["lifespan"]
        angle = e["stimulus"]["angle"]
        width = e["stimulus"]["width"]
        y = bar_ymap[(angle, width)]
        glia.draw_spikes(ax, spikes, y + 0.2, y + 0.8)
        ax.fill([0, lifespan, lifespan, 0], [y, y, y + 1, y + 1],
                facecolor="gray",
                edgecolor="none",
                alpha=0.1)

    ax.yaxis.set_ticks(np.arange(0, nwidths * nangles, nangles))
    ax.yaxis.set_ticklabels(sorted(list(widths)))
    ax.set_ylabel("Bar Width in pixels (angle changes each row)")
    ax.set_xlabel("Time in seconds")