def read_lick(folderpath):        
    rez = {}
    
    ################################
    # Process Reaction times file
    ################################
    rt_file = os.path.join(folderpath, "RT_264.mat")
    rt = loadmat(rt_file)
    
    rez['reaction_time'] = 3.0 + rt['reaction_time']
    
    ################################
    # Process lick_traces file
    ################################
    def lick_filter(data, bot_th, top_th):
        data[np.isnan(data)] = 0
        return np.logical_or(data <= bot_th, data >= top_th).astype(int)
    
    lick_traces_file = os.path.join(folderpath, "lick_traces.mat")
    lick_traces = loadmat(lick_traces_file)
    
    nTimesLick = len(lick_traces['licks_go'])
    freqLick = 100 # Hz
    rez['tLicks'] = np.linspace(0, (nTimesLick-1) / freqLick, nTimesLick)
    
    # Top threshold is wrong sometimes. Yaro said to use exact one
    thBot, thTop = lick_traces['bot_thresh'], 2.64
    
    for k in ['licks_go', 'licks_nogo', 'licks_miss', 'licks_FA', 'licks_early']:
        rez[k] = lick_filter(lick_traces[k], thBot, thTop)
        
    ################################
    # Process trials file
    ################################
    TIMESCALE_TRACES = 0.001 # ms
    trials_file = os.path.join(folderpath, os.path.basename(os.path.dirname(folderpath))+".mat")
    #print(trials_file)
    lick_trials = loadmat(trials_file)

    # NOTE: lick_trials['licks']['lick_vector'] is just a repeat from above lick_traces file
#     lick_trials['licks'] = merge_dicts([matstruct2dict(obj) for obj in lick_trials['licks']])
    lick_trials['trials'] = merge_dicts([matstruct2dict(obj) for obj in lick_trials['trials']])
    rez['reward_time'] = np.array(lick_trials['trials']['reward_time'], dtype=float) * TIMESCALE_TRACES
    rez['puff'] = [np.array(puff, dtype=float)*TIMESCALE_TRACES for puff in lick_trials['trials']['puff']]
        
    return rez
def nifty_wrapper(src_name_h5, pwd_h5, pwd_mat):
    # 1) Load data from HDF5
    src_path_h5 = os.path.join(pwd_h5, src_name_h5)
    print("Reading source data from", src_path_h5)
    src_file_h5 = h5py.File(src_path_h5, "r")
    src_data = np.copy(src_file_h5['data'])
    src_file_h5.close()
    
    print("read data from H5 : ", src_data.shape, type(src_data))
    
    # 2) Save data as temporary .mat
    src_name_mat = 'source_' + src_name_h5.split('.')[0] + '.mat'
    rez_name_mat = 'result_' + src_name_h5.split('.')[0] + '.mat'
    src_path_mat = os.path.join(pwd_mat, src_name_mat)
    rez_path_mat = os.path.join(pwd_mat, rez_name_mat)
    print("Converting data to matlab file", src_path_mat)
    scipy.io.savemat(src_path_mat, {"data" : src_data})

    # 3) Run matlab NIfTy to get TE
    print("Started running matlab")

    action1 = 'nifty_path = "'       + os.path.join(pwd_lib, "nifty_wrapper") + '/";'
    action2 = 'source_file_name = "' + src_path_mat + '";'
    action3 = 'result_file_name = "' + rez_path_mat + '";'
    action4 = 'addpath(char(nifty_path));'
    action5 = 'run("nifty_wrapper.m");'
    action_sum = action1 + action2 + action3 + action4 + action5 + "exit;"
    
    #action1 = 'core_path = "' + path2p + '/";'
    #action2 = 'addpath(char(core_path + "codes/lib/nifty_wrapper/"));'
    #action3 = 'run("nifty_wrapper.m");'
    #action_sum = action1 + action2 + action3 + "exit;"
    
    print("..Action:", action_sum)
    subprocess.run(["matlab", "-nodisplay", "-nosplash", "-nodesktop", "-r", action_sum])
    # '"run(' +"'test_nifty_alyosha.m');exit;" + '"'

    # 4) Load NIfTy TE output from .mat
    print("Loading matlab results file", rez_path_mat)
    rez_data = loadmat(rez_path_mat)
    
    # 5) Save results as HDF5
    rez_path_h5 = os.path.join(pwd_h5, 'result_' + src_name_h5.split('.')[0] + '.h5')
    print("Writing results data to", rez_path_h5)
    
    rez_file_h5 = h5py.File(rez_path_h5, "w")
    results_grp = rez_file_h5.create_group("results")
    results_grp['data'] = rez_data['results']['data']
    results_grp['TE_table'] = rez_data['results']['TE_table']
    results_grp['p_table'] = rez_data['results']['p_table']
    results_grp['delay_table'] = rez_data['results']['delay_table']
    rez_file_h5.close()
    
    return rez_path_h5
def read_neuro_perf(folderpath):
    # Read MAT file from command line
    print("Reading Yaro data from", folderpath)
    datafilename = os.path.join(folderpath, "data.mat")
    behaviorfilename = os.path.join(folderpath, "behaviorvar.mat")

    data = loadmat(datafilename)['data']
    behavior = loadmat(behaviorfilename)
    
    # Get rid of useless fields in behaviour
    behavior = {k : v for k, v in behavior.items() if k[0] != '_'}
    
    # Convert trials structure to a dictionary
    behavior['trials'] = merge_dicts([matstruct2dict(obj) for obj in behavior['trials']])
    
    # d_trials = matstruct2dict(behavior['trials'][0])
    # for i in range(1, len(behavior['trials'])):
    #     d_trials = merge_dict(d_trials, matstruct2dict(behavior['trials'][i]))
    # behavior['trials'] = d_trials
    
    return data, behavior
def read_neuro_perf(folderpath, verbose=True):
    # Read MAT file from command line
    if verbose:
        print("Reading Yaro data from", folderpath)
    datafilename = os.path.join(folderpath, "data.mat")
    behaviorfilename = os.path.join(folderpath, "behaviorvar.mat")

    data = loadmat(datafilename)['data']
    behavior = loadmat(behaviorfilename)

    # Get rid of useless fields in behaviour
    behavior = {k: v for k, v in behavior.items() if k[0] != '_'}

    # Convert trials structure to a dictionary
    behavior['trials'] = merge_dicts(
        [matstruct2dict(obj) for obj in behavior['trials']])

    # d_trials = matstruct2dict(behavior['trials'][0])
    # for i in range(1, len(behavior['trials'])):
    #     d_trials = merge_dict(d_trials, matstruct2dict(behavior['trials'][i]))
    # behavior['trials'] = d_trials

    # CONSISTENCY TEST:
    behKeys = ['iGO', 'iNOGO', 'iFA', 'iMISS']
    dataNTrials = data.shape[0]
    behavToArray = lambda b: np.array([b], dtype=int) if type(b) == int else b
    behNTrials = np.sum([len(behavToArray(behavior[k])) for k in behKeys])
    behMaxIdx = np.max(np.hstack([behavToArray(behavior[k]) for k in behKeys
                                  ])) - 1  # Note Matlab indices start from 1
    if dataNTrials < behNTrials:
        # raise ValueError("Behaviour has more trials than data", behNTrials, dataNTrials)
        print("Behaviour has more trials than data", behNTrials, dataNTrials)
    if (behMaxIdx is not None) and (behMaxIdx >= dataNTrials):
        # raise ValueError("Behaviour max index must be less than number of trials", behMaxIdx, dataNTrials)
        print("Behaviour max index must be less than number of trials",
              behMaxIdx, dataNTrials)

    return data, behavior
def read_paw(filepath):
    paw_trials = loadmat(filepath)['trials']
    
    nTrialsPaw, nTimePaw = paw_trials.shape
    if nTimePaw == 64:
        freqPaw = 7
    elif nTimePaw > 250:
        freqPaw = 30
    else:
        raise ValueError("Unexpected number of paw timesteps", nTimePaw)

    return {
        'tPaw' : np.linspace(0, (nTimePaw-1) / freqPaw, nTimePaw),
        'trialsPaw' : paw_trials,
        'freqPaw' : freqPaw
    }
def read_paw(folderpath, verbose=True):
    if verbose:
        print("Processing paw folder", folderpath)

    filepath = os.path.join(folderpath, 'trials.mat')
    rezdict = {'trialsPaw': loadmat(filepath)['trials']}

    nTrialsPaw, nTimesPaw = rezdict['trialsPaw'].shape
    if nTimesPaw == 64:
        freqPaw = 7
    elif nTimesPaw > 250:
        freqPaw = 30
    else:
        raise ValueError("Unexpected number of paw timesteps", nTimePaw)

    rezdict['tPaw'] = np.arange(0, nTimesPaw) / freqPaw
    rezdict['freqPaw'] = freqPaw
    return rezdict
def read_whisk(folderpath, verbose=True):
    if verbose:
        print("Processing whisk folder", folderpath)

    #############################
    # Read whisking angle
    #############################
    rezdict = {
        'whiskAngle':
        loadmat(os.path.join(folderpath, 'whiskAngle.mat'))['whiskAngle']
    }
    nTimesWhisk, nTrialsWhisk = rezdict['whiskAngle'].shape
    if nTimesWhisk <= 900:
        freqWhisk = 40
    elif nTimesWhisk >= 1600:
        freqWhisk = 200
    else:
        freqWhisk = 40
        # raise ValueError("Unexpected number of whisk timesteps", nTimesWhisk)
        print("Unexpected number of whisk timesteps", nTimesWhisk)

    rezdict['tWhisk'] = np.arange(0, nTimesWhisk) / freqWhisk
    rezdict['whiskAbsVelocity'] = np.vstack(
        (np.abs(rezdict['whiskAngle'][1:] - rezdict['whiskAngle'][:-1]) *
         freqWhisk, np.zeros(nTrialsWhisk)))

    #############################
    # Read first touch
    #############################
    firstTouchFilePath = os.path.join(folderpath,
                                      os.path.basename(folderpath) + '.txt')
    if not os.path.isfile(firstTouchFilePath):
        print("Warning: first touch file does not exist", firstTouchFilePath)
        rezdict['firstTouch'] = None
    else:
        with open(firstTouchFilePath) as fLog:
            rezdict['firstTouch'] = np.array(
                [line.split('\t')[1] for line in fLog.readlines()[1:]],
                dtype=float)

    return rezdict