Ejemplo n.º 1
0
def experiments_to_h5(experiments,
                      data_dset,
                      target_dset,
                      get_class=lambda x: x['metadata']['class'],
                      append=0,
                      progress=False,
                      class_dtype=np.uint16):
    """

    get_class is a function"""

    key_map = {}
    for k in experiments[0]['units'].keys():
        u = Unit.lookup[k]
        (row, column) = u.channel
        unit_num = u.unit_num
        key_map[k] = (row, column, unit_num)
    duration = experiments[0]['lifespan'] + append
    for l in experiments:
        try:
            assert duration == l['lifespan'] + append
        except:
            logger.info(f"duration: {duration} != {l['lifespan']}, for {l}")
            raise
    classes = np.array(f_map(get_class)(experiments), dtype=class_dtype)

    print(f"assigning experiments to hdf5 file")
    for i, e in enumerate(tqdm(experiments)):
        spike_train_to_h5(e, key_map, data_dset, index=i)
    target_dset[:] = classes
    return (data_dset, target_dset)
Ejemplo n.º 2
0
def experiments_to_ndarrays(experiments,
                            get_class=lambda x: x['metadata']['class'],
                            append=0,
                            progress=False,
                            class_dtype=np.uint16):
    """

    get_class is a function"""
    nE = len(experiments)
    logger.info(f"number of experiments to convert to ndarray: {nE}")
    print("converting to ndarray")

    key_map = {}
    for k in experiments[0]['units'].keys():
        u = Unit.lookup[k]
        (row, column) = u.channel
        unit_num = u.unit_num
        key_map[k] = (row, column, unit_num)
    duration = experiments[0]['lifespan'] + append
    for l in experiments:
        try:
            assert duration == l['lifespan'] + append
        except:
            logger.info(f"duration: {duration} != {l['lifespan']}, for {l}")
            raise
    d = int(np.ceil(duration * 1000))  # 1ms bins
    shape = (nE, d, Unit.nrow, Unit.ncol, Unit.nunit)
    # max 1 spike per ms in theory
    # 256 per bin should be plenty
    data = np.full(shape, 0, dtype=np.uint8)

    classes = np.array(f_map(get_class)(experiments), dtype=class_dtype)
    assert classes.shape == (nE, )

    to_sparse = partial(spike_train_to_sparse,
                        shape=shape[1:],
                        key_map=key_map)
    # arrays = f_map(to_sparse)(experiments)
    arrays = pmap(to_sparse, experiments, progress=True)
    # data[indices] = 1
    for i, array in enumerate(arrays):
        data[i] = array

    return (data, classes)
Ejemplo n.º 3
0
def plot_units(unit_plot_function,
               c_unit_fig,
               *units_data,
               nplots=1,
               ncols=1,
               nrows=None,
               ax_xsize=2,
               ax_ysize=2,
               figure_title=None,
               transpose=False,
               subplot_kw=None):
    """Create a giant figure with one or more plots per unit.
    
    c_unit_fig determines what happens to fig when produced
    transpose==True will yield axes by column 

    Must supply an even number of arguments that alternate function, units. If one pair is provided,
    ncols will determine the number of columns. Otherwise, each unit will get one row."""
    logger.info("plotting units")
    number_of_units = len(units_data[0].keys())

    processes = config.processes

    all_data = zip_dictionaries(*units_data)

    def data_generator():
        for unit_id, data in all_data:
            yield (unit_id, data)
            # ax_xsize, ax_ysize, figure_title, subplot_kw, semaphore)

    pool = Pool(processes)
    logger.info("passing tasks to pool")
    plot_worker = partial(_plot_worker, unit_plot_function, c_unit_fig, nplots,
                          ncols, nrows, ax_xsize, ax_ysize, figure_title,
                          transpose, subplot_kw)
    list(
        pool.imap_unordered(plot_worker,
                            tqdm(data_generator(), total=number_of_units)))
    pool.close()
    pool.join()
Ejemplo n.º 4
0
def init_logging(name, processes, verbose, debug):
    #### LOGGING CONFIGURATION
    fh = logging.FileHandler(name + '.log')
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler()
    if verbose:
        ch.setLevel(logging.INFO)
        # tracemalloc.start()
    elif debug:
        ch.setLevel(logging.DEBUG)

    else:
        ch.setLevel(logging.WARNING)
    if processes != None:
        config.processes = processes
    formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s',
                                  '%H:%M:%S')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    logger.addHandler(fh)
    logger.addHandler(ch)
    logger.info("Verbose logging on")
    logger.debug("Debug logging on")
Ejemplo n.º 5
0
def analyze(ctx,
            filename,
            trigger,
            threshold,
            eyecandy,
            ignore_extra=False,
            fix_missing=False,
            output=None,
            notebook=None,
            configuration=None,
            verbose=False,
            debug=False,
            processes=None,
            by_channel=False,
            integrity_filter=0.0,
            analog_idx=1,
            default_channel_map=False,
            dev=False):
    """Analyze data recorded with eyecandy.
    
    This command/function preprocesses the data & aligns stimuli to ephys
    recording.
    """
    print("version 0.5.1")
    init_logging(filename, processes, verbose, debug)
    #### FILEPATHS
    logger.debug(str(filename) + "   " + str(os.path.curdir))
    if not os.path.isfile(filename):
        try:
            filename = glia.match_filename(filename, "txt")
        except:
            filename = glia.match_filename(filename, "bxr")

    data_directory, data_name = os.path.split(filename)
    name, extension = os.path.splitext(data_name)
    analog_file = os.path.join(data_directory, name + '.analog')
    if not os.path.isfile(analog_file):
        # use 3brain analog file
        analog_file = os.path.join(data_directory, name + '.analog.brw')

    stimulus_file = os.path.join(data_directory, name + ".stim")
    ctx.obj = {"filename": os.path.join(data_directory, name)}
    print(f"Analyzing {name}")

    if configuration != None:
        with open(configuration, 'r') as f:
            user_config = yaml.safe_load(f)
        config.user_config = user_config
        if "analog_calibration" in user_config:
            config.analog_calibration = user_config["analog_calibration"]
        if "notebook" in user_config:
            notebook = user_config["notebook"]
        if "eyecandy" in user_config:
            eyecandy = user_config["eyecandy"]
        if "processes" in user_config:
            processes = user_config["processes"]
        if "integrity_filter" in user_config:
            integrity_filter = user_config["integrity_filter"]
        if "by_channel" in user_config:
            by_channel = user_config["by_channel"]

    if not notebook:
        notebook = glia.find_notebook(data_directory)

    lab_notebook = glia.open_lab_notebook(notebook)
    logger.info(name)
    experiment_protocol = glia.get_experiment_protocol(lab_notebook, name)
    flicker_version = experiment_protocol["flickerVersion"]

    #### LOAD STIMULUS
    try:
        metadata, stimulus_list, method = glia.read_stimulus(stimulus_file)
        ctx.obj["stimulus_list"] = stimulus_list
        ctx.obj["metadata"] = metadata
        # assert method=='analog-flicker'
    except:
        print(
            "No .stim file found. Creating from .analog file.".format(trigger))
        if flicker_version == 0.3:
            metadata, stimulus_list = glia.create_stimuli(
                analog_file, stimulus_file, notebook, name, eyecandy,
                analog_idx, ignore_extra, config.analog_calibration, threshold)
            ctx.obj["stimulus_list"] = stimulus_list
            ctx.obj["metadata"] = metadata
            print('finished creating .stim file')
        elif trigger == "ttl":
            raise ValueError('not implemented')
        else:
            raise ValueError("invalid trigger: {}".format(trigger))

    # look for .frames file
    try:
        lab_notebook_notype = glia.open_lab_notebook(notebook,
                                                     convert_types=False)
        protocol_notype = glia.get_experiment_protocol(lab_notebook_notype,
                                                       name)
        date_prefix = os.path.join(data_directory,
                                   protocol_notype['date'].replace(':', '_'))
        frames_file = date_prefix + "_eyecandy_frames.log"
        video_file = date_prefix + "_eyecandy.mkv"
        frame_log = pd.read_csv(frames_file)
        frame_log = frame_log[:-1]  # last frame is not encoded for some reason
        ctx.obj["frame_log"] = frame_log
        ctx.obj["video_file"] = video_file
    except Exception as e:
        extype, value, tb = sys.exc_info()
        traceback.print_exc()
        print(e)
        ctx.obj["frame_log"] = None
        ctx.obj["video_file"] = None
        print("Attempting to continue without frame log...")

    #### LOAD SPIKES
    spyking_regex = re.compile('.*\.result.hdf5$')
    eye = experiment_protocol['eye']
    experiment_n = experiment_protocol['experimentNumber']

    date = experiment_protocol['date'].date().strftime("%y%m%d")

    retina_id = date + '_R' + eye + '_E' + experiment_n
    if extension == ".txt":
        ctx.obj["units"] = glia.read_plexon_txt_file(filename, retina_id,
                                                     channel_map)
    elif extension == ".bxr":
        if default_channel_map:
            channel_map_3brain = config.channel_map_3brain
        else:
            channel_map_3brain = None
        ctx.obj["units"] = glia.read_3brain_spikes(filename,
                                                   retina_id,
                                                   channel_map_3brain,
                                                   truncate=dev)
    elif re.match(spyking_regex, filename):
        ctx.obj["units"] = glia.read_spyking_results(filename)
    else:
        raise ValueError(
            'could not read {}. Is it a plexon or spyking circus file?')

    #### DATA MUNGING OPTIONS
    if integrity_filter > 0.0:
        good_units = solid.filter_units_by_accuracy(ctx.obj["units"],
                                                    ctx.obj['stimulus_list'],
                                                    integrity_filter)
        filter_good_units = glia.f_filter(lambda u, v: u in good_units)
        ctx.obj["units"] = filter_good_units(ctx.obj["units"])

    if by_channel:
        ctx.obj["units"] = glia.combine_units_by_channel(ctx.obj["units"])

    # prepare_output
    plot_directory = os.path.join(data_directory, name + "-plots")
    config.plot_directory = plot_directory

    os.makedirs(plot_directory, exist_ok=True)
    os.chmod(plot_directory, 0o777)

    if output == "pdf":
        logger.debug("Outputting pdf")
        ctx.obj["retina_pdf"] = PdfPages(
            glia.plot_pdf_path(plot_directory, "retina"))
        ctx.obj["unit_pdfs"] = glia.open_pdfs(plot_directory,
                                              list(ctx.obj["units"].keys()),
                                              Unit.name_lookup())
        # c connotes 'continuation' for continuation passing style
        ctx.obj["c_unit_fig"] = partial(glia.add_to_unit_pdfs,
                                        unit_pdfs=ctx.obj["unit_pdfs"])
        ctx.obj["c_retina_fig"] = lambda x: ctx.obj["retina_pdf"].savefig(x)

    elif output == "png":
        logger.debug("Outputting png")
        ctx.obj["c_unit_fig"] = glia.save_unit_fig
        ctx.obj["c_retina_fig"] = glia.save_retina_fig
        os.makedirs(os.path.join(plot_directory, "00-all"), exist_ok=True)

        for unit_id in ctx.obj["units"].keys():
            name = unit_id
            os.makedirs(os.path.join(plot_directory, name), exist_ok=True)