def readPeriStimulusTifImages(tifPaths, basPaths, nBasCh = 16, ch_camTrig = 'patch4', ch_stim = 'patch3',\
                  tifNameStr = '', time_preStim = 1, time_postStim = 10, thr_stim = 1.5,\
                  thr_camTrig = 1, maxAllowedTimeBetweenStimAndCamTrig = 0.5, n_jobs = 1):
    """
    Given the directory to .tif files stored by ScanImage (Bessel beam image settings) and the full
    path to the accompanying bas files returns a dictionary with values holding peri-stimulus
    image data (Ca activity) in trialized format along with some other pertinent info.

    Parameters
    ----------
    tifDir: string
        Path to directory holding .tif files written by ScanImage. In the current setting, each .tif
        file holds nCh*3000 images, where nCh = number of channels.
    basPath: string
        Full path to the bas (BehavAndScan) file accompanying the imaging session.
    nBasCh: scalar
        Number of signal channels in the bas file.
    ch_camTrig: string
        Name of the channel in bas corresponding to the camera trigger signal
    ch_stim: string
        Name of the stimulus signal channel in bas.
    tifNameStr: string
        Only .tif files containing this will be read.
    time_preStim: scalar
        The length of the pre-stimulus time to include when reading images
    time_postStim: scalar
        The length of the post-stimulus time.
    thr_stim: scalar
        Threshold to use for detection of stimuli in the stimulus channel of bas.
    thr_camTrig: scalar
        Threshold to use for detection of camera trigger onsets in the camera trigger channel of bas.
    maxAllowedTimeBetweenStimAndCamTrig: scalar
        If a camera trigger is separated in time by the nearest stimulus by longer than this time
        interval, then ignore this stimulus trial.
    Returns
    -------
    D: dict
        Dictionary contaning the following keys:
        'I': array, (nTrials, nTime, nImageChannels, imageWidth, imageHeight)
            Image hyperstack arranged in conveniently-accessible trialized format.
        'tifInfo': dict
            Dictionary holding useful image metadata. Has following keys:
            'filePaths': list of strings
                Paths to .tif files
            'nImagesInfile': scalar int
                Number of images in each .tif file after accounting of number of
                image channels
            'nChannelsInFile': scalar int
                Number of image channels
        'inds_stim': array of integers, (nStim,)
            Indices in bas coordinates where stimuli occurred.
        'inds_stim_img': array of integers, (nStim,)
            Indices in image coordinates where stimuli occurred
        'inds_camTrig': array of integers, (nCameraTriggers,)
            Indices in bas coordinates corresponding to the onsets of camera triggers.
        'bas': dict
            BehavAndScan data

    """
    import tifffile as tff
    import numpy as np
    import apCode.FileTools as ft
    import apCode.ephys as ephys
    import apCode.SignalProcessingTools as spt
    import apCode.util as util

    #    import os
    def getImgIndsInTifs(tifInfo):
        nImgsInFile_cum = np.cumsum(tifInfo['nImagesInFile'] *
                                    tifInfo['nChannelsInFile'])
        imgIndsInTifs = []
        for i in range(len(nImgsInFile_cum)):
            if i == 0:
                inds_ = np.arange(0, nImgsInFile_cum[i])
            else:
                inds_ = np.arange(nImgsInFile_cum[i - 1], nImgsInFile_cum[i])
            imgIndsInTifs.append(inds_)
        return imgIndsInTifs

    ### Read relevant metadata from tif files in directory
    print('Reading ScanImage metadata from tif files...')

    tifInfo = ft.scanImageTifInfo(tifPaths)
    nCaImgs = np.sum(tifInfo['nImagesInFile'])

    ### Check for consistency in the number of image channels in all files.
    if len(np.unique(tifInfo['nChannelsInFile'])) > 1:
        print('Different number of image channels across files, check files!')
        return None
    nImgCh = tifInfo['nChannelsInFile'][0]
    print(f'{nCaImgs} {nImgCh}-channel images from all tif files')

    ### Get a list of indices corresponding to images in each of the tif files
    inds_imgsInTifs = getImgIndsInTifs(tifInfo)

    ### Read bas file to get stimulus and camera trigger indices required to align images and behavior
    print(
        'Reading and joining bas files, detecting stimuli and camera triggers...'
    )
    basList = [ephys.importCh(bp, nCh=nBasCh) for bp in basPaths]
    bas = concatenateBas(basList)
    inds_stim = spt.levelCrossings(bas[ch_stim], thr=thr_stim)[0]
    if len(inds_stim) == 0:
        print(
            f'Only {len(inds_stim)} stims detected, check channel specification or threshold'
        )
        return dict(bas=bas)
    inds_camTrig = spt.levelCrossings(bas[ch_camTrig], thr=thr_camTrig)[0]
    if len(inds_camTrig) == 0:
        print(
            f'Only {len(inds_camTrig)} cam trigs detected, check channel specification or threshold'
        )
        return dict(bas=bas)
    dt_vec = np.diff(bas['t'][inds_camTrig])
    dt_ca = np.round(np.mean(dt_vec) * 100) / 100
    print('Ca sampling rate = {}'.format(1 / dt_ca))
    inds_del = np.where(dt_vec <= (0.5 * dt_ca))[0] + 1
    inds_camTrig = np.delete(inds_camTrig, inds_del)

    ### Deal with possible mismatch in number of camera trigger indices and number of images in tif files
    if nCaImgs < len(inds_camTrig):
        inds_camTrig = inds_camTrig[:nCaImgs]
        nCaImgs_extra = 0
    elif nCaImgs > len(inds_camTrig):
        nCaImgs_extra = nCaImgs - len(inds_camTrig)
    else:
        nCaImgs_extra = 0
        print('{} extra Ca2+ images'.format(nCaImgs_extra))
    print('{} stimuli and {} camera triggers'.format(len(inds_stim),
                                                     len(inds_camTrig)))

    ### Indices of ca images closest to stimulus
    inds_stim_img = spt.nearestMatchingInds(inds_stim, inds_camTrig)

    ### Find trials where the nearest cam trigger is farther than the stimulus by a certain amount
    inds_camTrigNearStim = inds_camTrig[inds_stim_img]
    t_stim = bas['t'][inds_stim]
    t_camTrigNearStim = bas['t'][inds_camTrigNearStim]
    inds_tooFar = np.where(
        np.abs(t_stim -
               t_camTrigNearStim) > maxAllowedTimeBetweenStimAndCamTrig)[0]
    inds_ca_all = np.arange(nCaImgs)
    nPreStim = int(time_preStim / dt_ca)
    nPostStim = int(time_postStim / dt_ca)
    print("{} pre-stim points, and {} post-stim points".format(
        nPreStim, nPostStim))
    inds_ca_trl = np.array(
        spt.segmentByEvents(inds_ca_all, inds_stim_img + nCaImgs_extra,
                            nPreStim, nPostStim))
    ### Find trials that are too short to include the pre- or post-stimulus period
    trlLens = np.array([len(trl_) for trl_ in inds_ca_trl])
    inds_tooShort = np.where(trlLens < np.max(trlLens))[0]
    inds_trl_del = np.union1d(inds_tooFar, inds_tooShort)
    inds_trl_keep = np.setdiff1d(np.arange(len(inds_ca_trl)), inds_trl_del)

    ### Exclude the above 2 types of trials from consideration
    if len(inds_trl_del) > 0:
        print('Excluding the trials {}'.format(inds_trl_del))
        inds_ca_trl = inds_ca_trl[inds_trl_keep]

    I = []
    print('Reading trial-related images from tif files...')
    nTrls = len(inds_ca_trl)

    def trlImages(inds_ca_trl, inds_imgsInTifs, nImgCh, tifInfo, trl):
        trl_ = np.arange(trl.min() * nImgCh, (trl.max() + 1) * nImgCh)
        loc = util.locateItemsInSetsOfItems(trl_, inds_imgsInTifs)
        I_ = []
        for subInds, supInd in zip(loc['subInds'], loc['supInds']):
            with tff.TiffFile(tifInfo['filePaths'][supInd]) as tif:
                img = tif.asarray(key=subInds)
            I_.extend(img.reshape(-1, nImgCh, *img.shape[1:]))
        I_ = np.array(I_)
        return I_

    if n_jobs < 2:
        chunkSize = int(nTrls / 5)
        for iTrl, trl in enumerate(inds_ca_trl):
            if np.mod(iTrl, chunkSize) == 0:
                print('Trl # {}/{}'.format(iTrl + 1, nTrls))
            I_ = trlImages(inds_ca_trl, inds_imgsInTifs, nImgCh, tifInfo, trl)
            I.append(I_)
    else:
        print('Processing with dask')
        import dask
        from dask.diagnostics import ProgressBar
        for trl in inds_ca_trl:
            I_ = dask.delayed(trlImages)(inds_ca_trl, inds_imgsInTifs, nImgCh,
                                         tifInfo, trl)
            I.append(I_)
        with ProgressBar():
            I = dask.compute(*I)

    D = dict(I = np.squeeze(np.array(I)), tifInfo = tifInfo, inds_stim = inds_stim, inds_stim_img = inds_stim_img,\
             inds_camTrig = inds_camTrig,bas = bas, inds_trl_excluded = inds_trl_del)
    return D
def dataFrameOfMatchedMtrAndCaTrls_singleFish(bas, ca = None, ch_camTrig = 'camTrigger', ch_stim = 'patch3',
                                   ch_switch = 'patch2', thr_camTrig= 4, Fs_bas = 6000, t_pre_bas = 0.2,\
                                   t_post_bas = 1.5, t_pre_ca = 1, t_post_ca = 10, n_jobs = 20):
    """
    Parameters
    ----------
    bas: dict
        Dictionary resulting from reading of BAS file
    ca: array, (nRois, nSamples)
        nSamples is expected to match the number of Camera triggers
    Returns
    -------
    df: Pandas dataframe
    """
    import numpy as np
    from scipy.stats import mode
    import apCode.SignalProcessingTools as spt
    from apCode import util
    import pandas as pd

    def motorFromBas(bas):
        keys = list(bas.keys())
        ind_mtr = util.findStrInList('den', keys)
        if len(ind_mtr) > 0:
            ind_mtr = ind_mtr[-1]
            x = np.squeeze(np.array(bas[keys[ind_mtr]]))
        else:
            if 'ch3' in bas:
                x = np.array([bas['ch3'], bas['ch4']])
            else:
                x = np.array([bas['ch1'], bas['ch2']])
        if x.shape[0] < x.shape[1]:
            x = x.T
        return x

    mtr = motorFromBas(bas)
    stimInds, stimAmps, stimHt = getStimInfo(bas, ch_stim = ch_stim,\
                                             ch_camTrig= ch_camTrig, ch_switch= ch_switch)
    camTrigInds = getCamTrigInds(bas[ch_camTrig], thr=thr_camTrig)

    #    dt_bas = 1/Fs_bas
    dt_ca = np.mean(np.diff(bas['t'][camTrigInds]))
    Fs_ca = int(np.round(1 / dt_ca))
    if np.any(ca == None):
        nFrames = len(camTrigInds)


#        t_ca = np.arange(nFrames)*dt_ca
    else:
        nFrames = ca.shape[1]
        #        t_ca = np.arange(nFrames)*dt_ca
        d = len(camTrigInds) - ca.shape[1]
        if d > 0:
            print(f'Check threshold, {d} more camera triggers than expected')
        elif d < 0:
            print('Check threshold, {d} fewer camera triggers than expected')
    n_pre_bas = int(np.round(t_pre_bas * Fs_bas))
    n_post_bas = int(np.round(t_post_bas * Fs_bas))
    n_pre_ca = int(np.round(t_pre_ca * Fs_ca))
    n_post_ca = int(np.round(t_post_ca * Fs_ca))

    stimFrames = spt.nearestMatchingInds(stimInds, camTrigInds) - 2
    mtr_trl = spt.segmentByEvents(mtr,
                                  stimInds,
                                  n_pre_bas,
                                  n_post_bas,
                                  n_jobs=n_jobs)
    indsVec_ca = np.arange(nFrames)
    indsVec_ca_trl = spt.segmentByEvents(indsVec_ca,
                                         stimFrames,
                                         n_pre_ca,
                                         n_post_ca,
                                         n_jobs=n_jobs)
    trlNum_actual = np.arange(len(mtr_trl))
    lens_mtr = np.array([len(mtr_) for mtr_ in mtr_trl])
    lens_ca = np.array([len(inds_) for inds_ in indsVec_ca_trl])
    inds_del_mtr = np.where(lens_mtr != mode(lens_mtr)[0])[0]
    inds_del_ca = np.where(lens_ca != mode(lens_ca)[0])[0]
    inds_del_trl = np.union1d(inds_del_mtr, inds_del_ca)
    trlNum_actual = np.delete(trlNum_actual, inds_del_trl)
    mtr_trl = list(np.delete(
        mtr_trl, inds_del_trl,
        axis=0))  # Converting to list so that can later put in dataframe
    indsVec_ca_trl = list(np.delete(indsVec_ca_trl, inds_del_trl, axis=0))
    dic = dict(mtr_trl=mtr_trl, caInds_trl=indsVec_ca_trl)
    if not np.any(ca == None):
        if np.ndim(ca) == 1:
            ca = ca[np.newaxis, ...]
        ca_trl = [ca[:, inds_] for inds_ in indsVec_ca_trl]
        dic['ca_trl'] = ca_trl
    trlNum = np.arange(len(mtr_trl))
    dic['trlNum'] = trlNum
    dic['trlNum_actual'] = trlNum_actual
    dic['stimAmp'] = np.delete(stimAmps, inds_del_trl)
    dic['stimLoc'] = np.delete(stimHt, inds_del_trl)
    return pd.DataFrame(dic, columns=dic.keys())