Пример #1
0
def get_stimulus_parameters(stimulus_list, stimulus_type, parameter):
    f = compose(
        f_filter(lambda x: x["stimulus"]['stimulusType'] == stimulus_type),
        partial(group_by, key=lambda x: x["stimulus"][parameter]))
    parameters = sorted(list(f(stimulus_list).keys()))
    logger.debug(f"Parameters: {parameters}")
    assert len(parameters) > 0
    return parameters
Пример #2
0
def get_image_parameters(stimulus_list):
    f = compose(
        f_filter(lambda x: x["stimulus"]['stimulusType'] == 'IMAGE'),
        partial(group_by,
                key=lambda x: x["stimulus"]["metadata"]["parameter"]))
    parameters = sorted(list(f(stimulus_list).keys()))
    logger.debug(f"Parameters: {parameters}")
    assert len(parameters) > 0
    return parameters
Пример #3
0
def plot_spike_trains(fig,
                      axis_gen,
                      data,
                      prepend_start_time=0,
                      append_lifespan=0,
                      continuation=c_plot_solid,
                      ymap=None):
    ax = next(axis_gen)
    for i, v in enumerate(data):
        stimulus, spike_train = (v["stimulus"], v["spikes"])

        # use ymap to keep charts aligned if a comparable stimulus was not run
        # i.e., if each row is a different bar width but one chart is sparsely sampled
        if ymap:
            trial = ymap(stimulus)
        else:
            trial = i

        lifespan = stimulus['lifespan']
        logger.debug(
            "plot_spike_trains ({}) iteration: {}, lifespan: {}".format(
                stimulus["stimulusType"], trial, lifespan))
        if lifespan > 120:
            logger.debug("skipping stimulus longer than 120 seconds")

        if spike_train.size > 0:
            draw_spikes(ax, spike_train, ymin=trial + 0.3, ymax=trial + 1)

        stimulus_end = prepend_start_time + lifespan
        duration = stimulus_end + append_lifespan
        if stimulus_end != duration:
            # this is for solid
            ax.fill([0, prepend_start_time, prepend_start_time, 0],
                    [trial, trial, trial + 1, trial + 1],
                    facecolor="gray",
                    edgecolor="none",
                    alpha=0.1)
            ax.fill([stimulus_end, duration, duration, stimulus_end],
                    [trial, trial, trial + 1, trial + 1],
                    facecolor="gray",
                    edgecolor="none",
                    alpha=0.1)
        else:
            # draw all gray for all others
            ax.fill([0, lifespan, lifespan, 0],
                    [trial, trial, trial + 1, trial + 1],
                    facecolor="gray",
                    edgecolor="none",
                    alpha=0.1)

    continuation(ax)
Пример #4
0
def init_logging(name, processes, verbose, debug):
    #### LOGGING CONFIGURATION
    fh = logging.FileHandler(name + '.log')
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler()
    if verbose:
        ch.setLevel(logging.INFO)
        # tracemalloc.start()
    elif debug:
        ch.setLevel(logging.DEBUG)

    else:
        ch.setLevel(logging.WARNING)
    if processes != None:
        config.processes = processes
    formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s',
                                  '%H:%M:%S')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    logger.addHandler(fh)
    logger.addHandler(ch)
    logger.info("Verbose logging on")
    logger.debug("Debug logging on")
Пример #5
0
def _plot_worker(plot_function, c_unit_fig, nplots, ncols, nrows, ax_xsize,
                 ax_ysize, figure_title, transpose, subplot_kw, args):
    "Use with functools.partial() so function only takes args."

    logger.debug("plot worker")

    unit_id, data = args
    # ax_ysize, figure_title, subplot_kw, semaphore) = args
    if len(data) == 1:
        data = data[0]
    fig = plot(plot_function,
               data,
               nplots,
               ncols=ncols,
               nrows=nrows,
               ax_xsize=ax_xsize,
               ax_ysize=ax_ysize,
               figure_title=figure_title,
               transpose=transpose,
               subplot_kw=subplot_kw)

    logger.debug("Plot worker successful for {}".format(unit_id))
    c_unit_fig(unit_id, fig)
    plt.close(fig)
Пример #6
0
def classify_cmd(filename,
                 nsamples,
                 notebook,
                 skip,
                 debug=False,
                 verbose=False,
                 version=2,
                 processes=None,
                 n_draws=30,
                 px_per_deg=10.453):
    "Classify using converted NPZ"

    if not os.path.isfile(filename):
        filename = glia.match_filename(filename, 'npz')
    try:
        assert filename[-4:] == ".npz"
    except:
        print("Please specify a npz file.")
        raise

    data_directory, data_name = os.path.split(filename)
    if data_directory == '':
        data_directory = os.getcwd()

    if not notebook:
        notebook = glia.find_notebook(data_directory)

    lab_notebook = glia.open_lab_notebook(notebook)

    name, extension = os.path.splitext(data_name)
    init_logging(name, processes, verbose, debug)
    stim_name = strip_generated(name)
    stimulus_file = os.path.join(data_directory, stim_name + ".stim")
    metadata, stimulus_list, method = glia.read_stimulus(stimulus_file)
    if not skip:
        assert method == 'analog-flicker'  # if failed, delete .stim

    data = np.load(filename)
    shape = np.shape(data['training_data'])
    logger.debug(f"Data dim: {shape}")

    plots_directory = os.path.join(data_directory, name + "-plots")
    os.makedirs(plots_directory, exist_ok=True)
    plot_directory = os.path.join(plots_directory, "00-all")
    os.makedirs(plot_directory, exist_ok=True)

    # if letter:
    #     safe_run(classify.save_letter_npz_v2,
    #         (units, stimulus_list, name))
    # elif integrity:
    #     safe_run(convert.save_integrity_npz,
    #         (units, stimulus_list, name))
    name = metadata['name']
    if re.match('checkerboard', name):
        svc.checkerboard_svc(data,
                             metadata,
                             stimulus_list,
                             lab_notebook,
                             plot_directory,
                             nsamples,
                             n_draws,
                             px_per_deg=px_per_deg)
    elif re.match('grating-sinusoidal', name):
        svc.grating_svc(data,
                        metadata,
                        stimulus_list,
                        lab_notebook,
                        plot_directory,
                        nsamples,
                        n_draws,
                        sinusoid=True,
                        px_per_deg=px_per_deg)
    elif re.match('grating', name):
        svc.grating_svc(data,
                        metadata,
                        stimulus_list,
                        lab_notebook,
                        plot_directory,
                        nsamples,
                        n_draws,
                        px_per_deg=px_per_deg)
    elif 'faces' in name:
        # dict with entries like '[37, False, True]': 0
        class_resolver = data['class_resolver'].item()
        nclasses = np.array(list(class_resolver.values())).max() + 1
        id_map = {i: i for i in np.arange(nclasses)}
        num2gender = {}
        num2smiling = {}
        num2person = {}
        for class_str, num in class_resolver.items():
            temp = class_str[1:-1].split(", ")
            #     print(temp)
            image_class = [int(temp[0]), temp[1] == "True", temp[2] == "True"]
            num2gender[num] = image_class[1]
            num2smiling[num] = image_class[2]
            num2person[num] = image_class[0]
        target_mappers = [id_map, num2gender, num2person, num2smiling]
        mapper_classes = [
            np.arange(nclasses), ["Female", "Male"],
            np.arange(20), ["Not smiling", "Smiling"]
        ]
        mapper_names = ["by_image", "is_male", "by_person", "is_smiling"]
        svc.generic_image_classify(data, metadata, stimulus_list, lab_notebook,
                                   plot_directory, nsamples, target_mappers,
                                   mapper_classes, mapper_names)
    elif 'letters-tiled' == name:
        svc.tiled_letter_svc(data,
                             metadata,
                             stimulus_list,
                             lab_notebook,
                             plot_directory,
                             nsamples,
                             px_per_deg=px_per_deg)
    elif 'eyechart-saccade' == name:
        svc.image_svc(data,
                      metadata,
                      stimulus_list,
                      lab_notebook,
                      plot_directory,
                      nsamples,
                      px_per_deg=px_per_deg)
    elif 'letters-saccade' == name:
        svc.image_svc(data,
                      metadata,
                      stimulus_list,
                      lab_notebook,
                      plot_directory,
                      nsamples,
                      px_per_deg=px_per_deg)
    elif re.match('letter', name):
        svc.letter_svc(data,
                       metadata,
                       stimulus_list,
                       lab_notebook,
                       plot_directory,
                       nsamples,
                       px_per_deg=px_per_deg)
    else:
        raise (ValueError(f"unknown name: {name}"))
Пример #7
0
def process_cmd(filename, notebook, debug=False, verbose=False):
    "Process analog + frames log to align stim/ephys in .frames file."

    init_logging(filename, processes=None, verbose=verbose, debug=debug)
    #### FILEPATHS
    logger.debug(str(filename) + "   " + str(os.path.curdir))
    if not os.path.isfile(filename):
        try:
            filename = glia.match_filename(filename, "txt")
        except:
            filename = glia.match_filename(filename, "bxr")
    data_directory, data_name = os.path.split(filename)
    name, extension = os.path.splitext(data_name)
    analog_file = os.path.join(data_directory, name + '.analog')
    if not os.path.isfile(analog_file):
        # use 3brain analog file
        analog_file = os.path.join(data_directory, name + '.analog.brw')

    stimulus_file = os.path.join(data_directory, name + ".stim")
    if not notebook:
        notebook = glia.find_notebook(data_directory)
    lab_notebook = glia.open_lab_notebook(notebook, convert_types=False)
    experiment_protocol = glia.get_experiment_protocol(lab_notebook, name)

    date_prefix = (data_directory + experiment_protocol['date']).replace(
        ':', '_')
    frame_log_file = date_prefix + "_eyecandy_frames.log"
    video_file = date_prefix + "_eyecandy.mkv"

    container = av.open(str(video_file))
    n_video_frames = 0
    for _ in container.decode(video=0):
        n_video_frames += 1

    stimulus_list = glia.read_stimulus(stimulus_file)

    if analog_file[-4:] == ".brw":
        analog = glia.read_3brain_analog(analog_file)
    else:
        analog = glia.read_raw_voltage(analog_file)[:, 1]
    sampling_rate = glia.sampling_rate(analog_file)

    analog_std = 0.5 * analog.std() + analog.min()
    # beginning of experiment
    # TODO: after clustering, should subtract known / estimated latency for better frame time..?
    # for maximum temporal accuracy, frame should begin at start of slope
    approximate_start_idx = np.where(analog > analog_std)[0][0]
    baseline_offset = int(sampling_rate /
                          10)  # rise started before experiment_start_idx
    # we add 3sigma of baseline to baseline.max() to create threshold for end of experiment
    baseline_thresh = np.max(analog[:approximate_start_idx-baseline_offset]) \
                    + np.std(analog[:approximate_start_idx-baseline_offset])*3
    experiment_start_idx = np.where(analog > baseline_thresh)[0][0]

    frame_log = pd.read_csv(frame_log_file)

    nframes_in_log = len(frame_log)
    if np.abs(n_video_frames - nframes_in_log) > 1:
        logger.warn(
            f"found {n_video_frames} video frames, but {nframes_in_log} frames in log"
        )
    assert np.abs(n_video_frames - nframes_in_log) < 2
    # assert n_video_frames == nframes_in_log or n_video_frames + 1 == nframes_in_log
    # gross adjustment for start time
    frame_log.time = (frame_log.time - frame_log.time[0]
                      ) / 1000 + experiment_start_idx / sampling_rate

    # finer piecewise linear adjustments for each stimulus start frame
    # I've seen this off by 50ms after a 4.4s bar stimulus!
    newStimFrames = np.where(frame_log.stimulusIndex.diff())[0]
    stim_start_diff = np.abs(
        frame_log.iloc[newStimFrames].time.diff()[1:] - np.diff(
            np.array(list(map(lambda s: s["start_time"], stimulus_list[1])))))
    max_time_diff = stim_start_diff.max()
    print("stimulus start sum_time_diff", max_time_diff)
    print("stimulus start mean_time_diff", stim_start_diff.mean())
    assert len(newStimFrames) == len(stimulus_list[1])
    for n, stim in enumerate(stimulus_list[1]):
        flickerTime = stim["start_time"]
        frameNum = newStimFrames[n]
        if n + 1 < len(stimulus_list[1]):
            nextFrameNum = newStimFrames[n + 1]
            # we adjust all frames in a stimulus
            loc = (frame_log.framenum >= frameNum) & (frame_log.framenum <
                                                      nextFrameNum)
        else:
            loc = (frame_log.framenum >= frameNum)
        frame_log_time = frame_log.loc[frameNum].time
        time_delta = flickerTime - frame_log_time
        frame_log.loc[loc, 'time'] += time_delta

    stim_start_diff = np.abs(
        frame_log.iloc[newStimFrames].time.diff()[1:] - np.diff(
            np.array(list(map(lambda s: s["start_time"], stimulus_list[1])))))
    max_time_diff = stim_start_diff.max()
    print("post alignment stimulus start sum_time_diff", max_time_diff)
    assert max_time_diff < 0.001
    # frame_log.head()

    name, _ = os.path.splitext(frame_log_file)
    frame_log.to_csv(name + ".frames", index=False)
    print(f"Saved to {name + '.frames'}")
Пример #8
0
def analyze(ctx,
            filename,
            trigger,
            threshold,
            eyecandy,
            ignore_extra=False,
            fix_missing=False,
            output=None,
            notebook=None,
            configuration=None,
            verbose=False,
            debug=False,
            processes=None,
            by_channel=False,
            integrity_filter=0.0,
            analog_idx=1,
            default_channel_map=False,
            dev=False):
    """Analyze data recorded with eyecandy.
    
    This command/function preprocesses the data & aligns stimuli to ephys
    recording.
    """
    print("version 0.5.1")
    init_logging(filename, processes, verbose, debug)
    #### FILEPATHS
    logger.debug(str(filename) + "   " + str(os.path.curdir))
    if not os.path.isfile(filename):
        try:
            filename = glia.match_filename(filename, "txt")
        except:
            filename = glia.match_filename(filename, "bxr")

    data_directory, data_name = os.path.split(filename)
    name, extension = os.path.splitext(data_name)
    analog_file = os.path.join(data_directory, name + '.analog')
    if not os.path.isfile(analog_file):
        # use 3brain analog file
        analog_file = os.path.join(data_directory, name + '.analog.brw')

    stimulus_file = os.path.join(data_directory, name + ".stim")
    ctx.obj = {"filename": os.path.join(data_directory, name)}
    print(f"Analyzing {name}")

    if configuration != None:
        with open(configuration, 'r') as f:
            user_config = yaml.safe_load(f)
        config.user_config = user_config
        if "analog_calibration" in user_config:
            config.analog_calibration = user_config["analog_calibration"]
        if "notebook" in user_config:
            notebook = user_config["notebook"]
        if "eyecandy" in user_config:
            eyecandy = user_config["eyecandy"]
        if "processes" in user_config:
            processes = user_config["processes"]
        if "integrity_filter" in user_config:
            integrity_filter = user_config["integrity_filter"]
        if "by_channel" in user_config:
            by_channel = user_config["by_channel"]

    if not notebook:
        notebook = glia.find_notebook(data_directory)

    lab_notebook = glia.open_lab_notebook(notebook)
    logger.info(name)
    experiment_protocol = glia.get_experiment_protocol(lab_notebook, name)
    flicker_version = experiment_protocol["flickerVersion"]

    #### LOAD STIMULUS
    try:
        metadata, stimulus_list, method = glia.read_stimulus(stimulus_file)
        ctx.obj["stimulus_list"] = stimulus_list
        ctx.obj["metadata"] = metadata
        # assert method=='analog-flicker'
    except:
        print(
            "No .stim file found. Creating from .analog file.".format(trigger))
        if flicker_version == 0.3:
            metadata, stimulus_list = glia.create_stimuli(
                analog_file, stimulus_file, notebook, name, eyecandy,
                analog_idx, ignore_extra, config.analog_calibration, threshold)
            ctx.obj["stimulus_list"] = stimulus_list
            ctx.obj["metadata"] = metadata
            print('finished creating .stim file')
        elif trigger == "ttl":
            raise ValueError('not implemented')
        else:
            raise ValueError("invalid trigger: {}".format(trigger))

    # look for .frames file
    try:
        lab_notebook_notype = glia.open_lab_notebook(notebook,
                                                     convert_types=False)
        protocol_notype = glia.get_experiment_protocol(lab_notebook_notype,
                                                       name)
        date_prefix = os.path.join(data_directory,
                                   protocol_notype['date'].replace(':', '_'))
        frames_file = date_prefix + "_eyecandy_frames.log"
        video_file = date_prefix + "_eyecandy.mkv"
        frame_log = pd.read_csv(frames_file)
        frame_log = frame_log[:-1]  # last frame is not encoded for some reason
        ctx.obj["frame_log"] = frame_log
        ctx.obj["video_file"] = video_file
    except Exception as e:
        extype, value, tb = sys.exc_info()
        traceback.print_exc()
        print(e)
        ctx.obj["frame_log"] = None
        ctx.obj["video_file"] = None
        print("Attempting to continue without frame log...")

    #### LOAD SPIKES
    spyking_regex = re.compile('.*\.result.hdf5$')
    eye = experiment_protocol['eye']
    experiment_n = experiment_protocol['experimentNumber']

    date = experiment_protocol['date'].date().strftime("%y%m%d")

    retina_id = date + '_R' + eye + '_E' + experiment_n
    if extension == ".txt":
        ctx.obj["units"] = glia.read_plexon_txt_file(filename, retina_id,
                                                     channel_map)
    elif extension == ".bxr":
        if default_channel_map:
            channel_map_3brain = config.channel_map_3brain
        else:
            channel_map_3brain = None
        ctx.obj["units"] = glia.read_3brain_spikes(filename,
                                                   retina_id,
                                                   channel_map_3brain,
                                                   truncate=dev)
    elif re.match(spyking_regex, filename):
        ctx.obj["units"] = glia.read_spyking_results(filename)
    else:
        raise ValueError(
            'could not read {}. Is it a plexon or spyking circus file?')

    #### DATA MUNGING OPTIONS
    if integrity_filter > 0.0:
        good_units = solid.filter_units_by_accuracy(ctx.obj["units"],
                                                    ctx.obj['stimulus_list'],
                                                    integrity_filter)
        filter_good_units = glia.f_filter(lambda u, v: u in good_units)
        ctx.obj["units"] = filter_good_units(ctx.obj["units"])

    if by_channel:
        ctx.obj["units"] = glia.combine_units_by_channel(ctx.obj["units"])

    # prepare_output
    plot_directory = os.path.join(data_directory, name + "-plots")
    config.plot_directory = plot_directory

    os.makedirs(plot_directory, exist_ok=True)
    os.chmod(plot_directory, 0o777)

    if output == "pdf":
        logger.debug("Outputting pdf")
        ctx.obj["retina_pdf"] = PdfPages(
            glia.plot_pdf_path(plot_directory, "retina"))
        ctx.obj["unit_pdfs"] = glia.open_pdfs(plot_directory,
                                              list(ctx.obj["units"].keys()),
                                              Unit.name_lookup())
        # c connotes 'continuation' for continuation passing style
        ctx.obj["c_unit_fig"] = partial(glia.add_to_unit_pdfs,
                                        unit_pdfs=ctx.obj["unit_pdfs"])
        ctx.obj["c_retina_fig"] = lambda x: ctx.obj["retina_pdf"].savefig(x)

    elif output == "png":
        logger.debug("Outputting png")
        ctx.obj["c_unit_fig"] = glia.save_unit_fig
        ctx.obj["c_retina_fig"] = glia.save_retina_fig
        os.makedirs(os.path.join(plot_directory, "00-all"), exist_ok=True)

        for unit_id in ctx.obj["units"].keys():
            name = unit_id
            os.makedirs(os.path.join(plot_directory, name), exist_ok=True)
Пример #9
0
def generate(ctx, filename, eyecandy, generate_method, notebook, number,
             nunits, stimulus):
    data_directory, data_name = os.path.split(filename)
    if data_directory == '':
        data_directory = os.getcwd()

    if not notebook:
        notebook = glia.find_notebook(data_directory)

    lab_notebook = glia.open_lab_notebook(notebook)
    name, ext = os.path.splitext(filename)

    ctx.obj = {'filename': generate_method + "_" + name}

    stimulus_file = os.path.join(data_directory, name + ".stim")
    try:
        metadata, stimulus_list, method = glia.read_stimulus(stimulus_file)
        print('found .stim file')
    except:
        print('creating .stim file.')
        metadata, stimulus_list = glia.create_stimuli_without_analog(
            stimulus_file, notebook, name, eyecandy)

    ctx.obj["stimulus_list"] = stimulus_list
    ctx.obj["metadata"] = metadata
    # total_time = sum(map(lambda x: x['stimulus']['lifespan'], stimulus_list))
    last_stim = stimulus_list[-1]
    total_time = last_stim['start_time'] + last_stim['stimulus']['lifespan']
    units = {}
    retina_id = f'{generate_method}_{name}'
    print('generating test data')
    for channel_x in range(number):
        for channel_y in range(number):
            # for unit_j in range(randint(1,5)):
            for unit_j in range(nunits):
                if generate_method == 'random':
                    u = glia.random_unit(total_time, retina_id,
                                         (channel_x, channel_y), unit_j)
                elif generate_method == "hz":
                    # hz = randint(1,90)
                    hz = 60
                    u = glia.hz_unit(total_time, hz, retina_id,
                                     (channel_x, channel_y), unit_j)
                else:
                    raise (ValueError(
                        f"Undefined generate_method: {generate_method}"))

                units[u.id] = u
    ctx.obj["units"] = units

    # prepare_output
    plot_directory = os.path.join(data_directory, f"{retina_id}-plots")
    config.plot_directory = plot_directory

    os.makedirs(plot_directory, exist_ok=True)
    os.chmod(plot_directory, 0o777)

    logger.debug("Outputting png")
    ctx.obj["c_unit_fig"] = glia.save_unit_fig
    ctx.obj["c_retina_fig"] = glia.save_retina_fig
    os.makedirs(os.path.join(plot_directory, "00-all"), exist_ok=True)

    for unit_id in ctx.obj["units"].keys():
        name = unit_id
        os.makedirs(os.path.join(plot_directory, name), exist_ok=True)

    try:
        lab_notebook_notype = glia.open_lab_notebook(notebook,
                                                     convert_types=False)
        protocol_notype = glia.get_experiment_protocol(lab_notebook_notype,
                                                       name)
        date_prefix = os.path.join(data_directory,
                                   protocol_notype['date'].replace(':', '_'))
        frames_file = date_prefix + "_eyecandy_frames.log"
        video_file = date_prefix + "_eyecandy.mkv"
        frame_log = pd.read_csv(frames_file)
        frame_log = frame_log[:-1]  # last frame is not encoded for some reason
        ctx.obj["frame_log"] = frame_log
        ctx.obj["video_file"] = video_file
    except Exception as e:
        extype, value, tb = sys.exc_info()
        traceback.print_exc()
        print(e)
        print("Attempting to continue without frame log...")
Пример #10
0
def save_retina_fig(filename, fig):
    logger.debug("Saving {} for retina".format(filename))
    fig.savefig(
        os.path.join(config.plot_directory, "00-all", filename + ".png"))
Пример #11
0
def save_unit_fig(filename, unit_id, fig):
    logger.debug("Saving {} for {}".format(filename, unit_id))
    name = unit_id
    fig.savefig(os.path.join(config.plot_directory, name, filename + ".png"))
Пример #12
0
def classify_cmd(filename,
                 nsamples,
                 notebook,
                 skip,
                 debug=False,
                 verbose=False,
                 version=2,
                 processes=None,
                 n_draws=30):
    "Classify using converted NPZ"

    if not os.path.isfile(filename):
        filename = glia.match_filename(filename, 'npz')
    try:
        assert filename[-4:] == ".npz"
    except:
        print("Please specify a npz file.")
        raise

    data_directory, data_name = os.path.split(filename)
    if data_directory == '':
        data_directory = os.getcwd()

    if not notebook:
        notebook = glia.find_notebook(data_directory)

    lab_notebook = glia.open_lab_notebook(notebook)

    name, extension = os.path.splitext(data_name)
    init_logging(name, processes, verbose, debug)
    stim_name = strip_generated(name)
    stimulus_file = os.path.join(data_directory, stim_name + ".stim")
    metadata, stimulus_list, method = glia.read_stimulus(stimulus_file)
    if not skip:
        assert method == 'analog-flicker'  # if failed, delete .stim

    data = np.load(filename)
    shape = np.shape(data['training_data'])
    logger.debug(f"Data dim: {shape}")

    plots_directory = os.path.join(data_directory, name + "-plots")
    os.makedirs(plots_directory, exist_ok=True)
    plot_directory = os.path.join(plots_directory, "00-all")
    os.makedirs(plot_directory, exist_ok=True)

    # if letter:
    #     safe_run(classify.save_letter_npz_v2,
    #         (units, stimulus_list, name))
    # elif integrity:
    #     safe_run(convert.save_integrity_npz,
    #         (units, stimulus_list, name))
    name = metadata['name']
    if re.match('checkerboard', name):
        svc.checkerboard_svc(data, metadata, stimulus_list, lab_notebook,
                             plot_directory, nsamples, n_draws)
    elif re.match('grating-sinusoidal', name):
        svc.grating_svc(data,
                        metadata,
                        stimulus_list,
                        lab_notebook,
                        plot_directory,
                        nsamples,
                        n_draws,
                        sinusoid=True)
    elif re.match('grating', name):
        svc.grating_svc(data, metadata, stimulus_list, lab_notebook,
                        plot_directory, nsamples, n_draws)
    elif 'letters-tiled' == name:
        svc.tiled_letter_svc(data, metadata, stimulus_list, lab_notebook,
                             plot_directory, nsamples)
    elif 'eyechart-saccade' == name:
        svc.image_svc(data, metadata, stimulus_list, lab_notebook,
                      plot_directory, nsamples)
    elif 'letters-saccade' == name:
        svc.image_svc(data, metadata, stimulus_list, lab_notebook,
                      plot_directory, nsamples)
    elif re.match('letter', name):
        svc.letter_svc(data, metadata, stimulus_list, lab_notebook,
                       plot_directory, nsamples)
    else:
        raise (ValueError(f"unknown name: {name}"))
Пример #13
0
def generate(ctx, filename, eyecandy, generate_method, notebook, number,
             nunits, stimulus):
    data_directory, data_name = os.path.split(filename)
    if data_directory == '':
        data_directory = os.getcwd()

    if not notebook:
        notebook = glia.find_notebook(data_directory)

    lab_notebook = glia.open_lab_notebook(notebook)
    name, ext = os.path.splitext(filename)

    ctx.obj = {'filename': generate_method + "_" + name}

    stimulus_file = os.path.join(data_directory, name + ".stim")
    try:
        metadata, stimulus_list, method = glia.read_stimulus(stimulus_file)
        print('found .stim file')
    except:
        print('creating .stim file.')
        metadata, stimulus_list = glia.create_stimuli_without_analog(
            stimulus_file, notebook, name, eyecandy)

    ctx.obj["stimulus_list"] = stimulus_list
    ctx.obj["metadata"] = metadata
    # total_time = sum(map(lambda x: x['stimulus']['lifespan'], stimulus_list))
    last_stim = stimulus_list[-1]
    total_time = last_stim['start_time'] + last_stim['stimulus']['lifespan']
    units = {}
    retina_id = f'{generate_method}_{name}'
    print('generating test data')
    for channel_x in range(number):
        for channel_y in range(number):
            # for unit_j in range(randint(1,5)):
            for unit_j in range(nunits):
                if generate_method == 'random':
                    u = glia.random_unit(total_time, retina_id,
                                         (channel_x, channel_y), unit_j)
                elif generate_method == "hz":
                    # hz = randint(1,90)
                    hz = 60
                    u = glia.hz_unit(total_time, hz, retina_id,
                                     (channel_x, channel_y), unit_j)
                else:
                    raise (ValueError(
                        f"Undefined generate_method: {generate_method}"))

                units[u.id] = u
    ctx.obj["units"] = units

    # prepare_output
    plot_directory = os.path.join(data_directory, f"{retina_id}-plots")
    config.plot_directory = plot_directory

    os.makedirs(plot_directory, exist_ok=True)
    os.chmod(plot_directory, 0o777)

    logger.debug("Outputting png")
    ctx.obj["c_unit_fig"] = glia.save_unit_fig
    ctx.obj["c_retina_fig"] = glia.save_retina_fig
    os.makedirs(os.path.join(plot_directory, "00-all"), exist_ok=True)

    for unit_id in ctx.obj["units"].keys():
        name = unit_id
        os.makedirs(os.path.join(plot_directory, name), exist_ok=True)