Beispiel #1
0
def drain_model(
        varMdl,
        strRoi,
        strHmsph,
        strPthPrf,
        strPthPrfOt,
        strPthPltOt,  #noqa
        strFlTp,
        varDpi,
        strXlabel,
        strYlabel,
        lstCon,
        lstConLbl,
        varNumIt,
        varCnfLw,
        varCnfUp,
        varNseRndSd,
        varNseSys,
        lstFctr,
        varAcrSubsYmin01,
        varAcrSubsYmax01,
        varAcrSubsYmin02,
        varAcrSubsYmax02,
        tplPadY=(0.4, 0.1),
        varNumLblY=5):
    """Model-based correction of draining effect."""
    # -------------------------------------------------------------------------
    # *** Load depth profile from disk

    print('-Model-based correction of draining effect')

    print('---Loading data')

    # Load array for first condition to get dimensions:
    objNpz = np.load(strPthPrf.format(lstCon[0]))
    aryTmpDpth = objNpz['arySubDpthMns']

    # Number of subjects:
    varNumSub = aryTmpDpth.shape[0]
    # Get number of depth levels from input array:
    varNumDpth = aryTmpDpth.shape[1]
    # Number of conditions:
    varNumCon = len(lstCon)

    # Array for single-subject depth sampling results, of the form
    # aryEmpSnSb[idxSub, idxCondition, idxDpth].
    aryEmpSnSb = np.zeros((varNumSub, varNumCon, varNumDpth))

    # Load single-condition arrays from disk:
    for idxCon in range(varNumCon):

        # ---------------------------------------------------------------------
        # --- Makeshift solution for asymmetrical ROIs ---
        # In the surface experiment, the central ROI needs to be slightly
        # different for the 'Kanizsa' condition than for the 'Kanizsa rotated'
        # condition. In the 'Kanizsa' condition, the central ROI is a square
        # (avoiding the illusory contours). In the 'Kanizsa roated' condition,
        # the central ROI is a diamond of the size than the square (rotated by
        # 45 deg). The diamond avoids the rotated Kanizsa inducer (which
        # extends further towards the centre of the field of view, because the
        # 'mouth' of the inducer is oriented away from the centre).

        # Current condition:
        strTmpCon = lstCon[idxCon]

        # If processing the central ROI for the 'Kanizsa rotated' condition,
        # don't use the square ROI mask, but use the diamond ROI instead.
        lgcTmp = (('kanizsa_rotated' in strTmpCon) and ('centre' in strPthPrf))

        if lgcTmp:
            print(('------Using diamond ROI (instead of square ROI) for ' +
                   'Kanizsa rotated condition.'))

            # Use depth profiles from diamond ROI:
            strPthPrfTmp = strPthPrf.replace('centre', 'diamond')

        else:
            # Use square ROI:
            strPthPrfTmp = strPthPrf

        # Load depth profiles from disk:
        objNpz = np.load(strPthPrfTmp.format(strTmpCon))

        # --- End of makeshift solution ---
        # ---------------------------------------------------------------------

        # objNpz = np.load(strPthPrf.format(lstCon[idxCon]))

        # Get array from npz object:
        aryEmpSnSb[:, idxCon, :] = objNpz['arySubDpthMns']

    # Array with number of vertices (for weighted averaging across subjects;
    # since the deconvolution is done separately for each subject, this vector
    # is only loaded to be passed on to the file with the deconvolved depth
    # profiles for weighted averaging in downstream analysis steps), shape:
    # vecNumInc[subjects].
    vecNumInc = objNpz['vecNumInc']

    # Number of subjects:
    # varNumSub = aryEmpSnSb.shape[0]

    # Number of conditions:
    # varNumCon = aryEmpSnSb.shape[1]

    # Number of equi-volume depth levels in the input data:
    # varNumDpth = aryEmpSnSb.shape[2]

    # -------------------------------------------------------------------------
    # *** Subject-by-subject deconvolution

    print('---Subject-by-subject deconvolution')

    # Array for single-subject interpolation result (before deconvolution):
    aryEmp5SnSb = np.zeros((varNumSub, varNumCon, 5))

    if (varMdl != 4) and (varMdl != 5) and (varMdl != 6):
        # Array for single-subject deconvolution result (defined at 5 depth
        # levels):
        aryDecon5 = np.zeros((varNumSub, varNumCon, 5))
        # Array for deconvolution results in equi-volume space:
        aryDecon = np.zeros((varNumSub, varNumCon, varNumDpth))

    elif (varMdl == 4) or (varMdl == 5):
        # The array for single-subject deconvolution result has an additional
        # dimension in case of model 4 (number of iterations):
        aryDecon5 = np.zeros((varNumSub, varNumIt, varNumCon, 5))
        # Array for deconvolution results in equi-volume space:
        aryDecon = np.zeros((varNumSub, varNumIt, varNumCon, varNumDpth))
        # Generate random noise for model 4:
        aryNseRnd = np.random.randn(varNumIt, varNumCon, varNumDpth)
        # Scale variance:
        aryNseRnd = np.multiply(aryNseRnd, varNseRndSd)
        # Centre at one:
        aryNseRnd = np.add(aryNseRnd, 1.0)

    if varMdl == 5:
        # Additional array for deconvolution results with systematic error,
        # defined at 5 depth levels:
        arySys5 = np.zeros((varNumSub, 2, varNumCon, 5))
        # Array for deconvolutino results with systematic error, defined at
        # empirical depth levels:
        arySys = np.zeros((varNumSub, 2, varNumCon, varNumDpth))

    if varMdl == 6:
        # Array for single-subject deconvolution result (defined at 5 depth
        # levels):
        aryDecon5 = np.zeros((varNumSub, len(lstFctr), varNumCon, 5))
        # Array for deconvolution results in equi-volume space:
        aryDecon = np.zeros((varNumSub, len(lstFctr), varNumCon, varNumDpth))

    for idxSub in range(0, varNumSub):

        # ---------------------------------------------------------------------
        # *** Interpolation (downsampling)

        # The empirical depth profiles are defined at more depth levels than
        # the draining model. We downsample the empirical depth profiles to the
        # number of depth levels of the model.

        # The relative thickness of the layers differs between V1 & V2.
        if strRoi == 'v1':
            print('------Interpolation - V1')
            # Relative thickness of the layers (layer VI, 20%; layer V, 10%;
            # layer IV, 40%; layer II/III, 20%; layer I, 10%; Markuerkiaga et
            # al. 2016). lstThck = [0.2, 0.1, 0.4, 0.2, 0.1]
            # From the relative thickness, we derive the relative position of
            # the layers (we set the position of each layer to the sum of all
            # lower layers plus half  its own thickness):
            vecPosMdl = np.array([0.1, 0.25, 0.5, 0.8, 0.95])

        elif (strRoi == 'v2') or ((strRoi == 'v3')):
            print('------Interpolation - V2/V3')
            # Relative position of the layers, accordign to Weber et al., 2008,
            # Figure 5C, p. 2322. Their data is on 'extrastriate cortex', from
            # V2 to V5. We start with the absolute depth:
            vecPosMdl = np.array([160.0, 590.0, 1110.0, 1400.0, 1620.0])
            # Divide by overall thickness (1.7 mm):
            vecPosMdl = np.divide(vecPosMdl, 1700.0)

        # Position of empirical datapoints:
        vecPosEmp = np.linspace(np.min(vecPosMdl),
                                np.max(vecPosMdl),
                                num=varNumDpth,
                                endpoint=True)

        # Vector for downsampled empirical depth profiles:
        aryEmp5 = np.zeros((varNumCon, 5))

        # Loop through conditions and downsample the depth profiles:
        for idxCon in range(0, varNumCon):
            # Interpolation:
            aryEmp5[idxCon] = griddata(vecPosEmp,
                                       aryEmpSnSb[idxSub, idxCon, :],
                                       vecPosMdl,
                                       method='cubic')

        # Put interpolation result for this subject into the array:
        aryEmp5SnSb[idxSub, :, :] = np.copy(aryEmp5)

        # ---------------------------------------------------------------------
        # *** Subtraction of draining effect

        # (1) Deconvolution based on Markuerkiaga et al. (2016).
        if varMdl == 1:
            aryDecon5[idxSub, :, :] = deconv_01(varNumCon,
                                                aryEmp5SnSb[idxSub, :, :])

        # (2) Deconvolution based on Markuerkiaga et al. (2016) & scaling based
        #     on Markuerkiaga et al. (2016).
        elif varMdl == 2:
            aryDecon5[idxSub, :, :] = deconv_02(varNumCon,
                                                aryEmp5SnSb[idxSub, :, :])

        # (3) Deconvolution based on Markuerkiaga et al. (2016) & scaling based
        #     on Weber et al. (2008).
        elif varMdl == 3:
            aryDecon5[idxSub, :, :] = deconv_03(varNumCon,
                                                aryEmp5SnSb[idxSub, :, :],
                                                strRoi=strRoi)

        # (4) Deconvolution based on Markuerkiaga et al. (2016), with random
        #     error.
        elif varMdl == 4:
            aryDecon5[idxSub, :, :, :] = deconv_04(varNumCon, aryEmp5,
                                                   aryNseRnd)

        # (5) Deconvolution based on Markuerkiaga et al. (2016), with random
        #     and systematic error.
        elif varMdl == 5:
            aryDecon5[idxSub, :, :, :], arySys5[idxSub, :, :, :] = \
                deconv_05(varNumCon, aryEmp5, aryNseRnd, varNseSys)

        # (6) Deconvolution based on Markuerkiaga et al. (2016), with deep GM
        #     signal scaling factor.
        elif varMdl == 6:
            aryDecon5[idxSub, :, :, :] = \
                deconv_06(varNumCon, aryEmp5, lstFctr)

        # (7) Deconvolution based on Markuerkiaga et al. (2016); same as (1),
        #     but using matrix inversion instead of iterative subtraction.
        if varMdl == 7:
            aryDecon5[idxSub, :, :] = deconv_07(varNumCon,
                                                aryEmp5SnSb[idxSub, :, :])

        # ---------------------------------------------------------------------
        # *** Interpolation

        # The original depth profiles were in 'equi-volume' space, and needed
        # to be downsampled in order to apply the deconvolution (because the
        # deconvolution model is defined at a lower number of depth levels than
        # the equivolume space). Here, the results of the deconvolution are
        # brought back into equivolume space. This is advantageous for the
        # creation of depth plots (equal spacing of data points on x-axis), and
        # for the calculation of peak positions (no additional information
        # about relative position of datapoints needs to be passed on).

        # Sampling points for equi-volume space:
        vecIntpEqui = np.linspace(np.min(vecPosMdl),
                                  np.max(vecPosMdl),
                                  num=varNumDpth,
                                  endpoint=True)

        if (varMdl != 4) and (varMdl != 5) and (varMdl != 6):

            # Loop through conditions:
            for idxCon in range(0, varNumCon):

                # Interpolation back into equi-volume space:
                aryDecon[idxSub, idxCon, :] = griddata(vecPosMdl,
                                                       aryDecon5[idxSub,
                                                                 idxCon, :],
                                                       vecIntpEqui,
                                                       method='cubic')

        elif (varMdl == 4) or (varMdl == 5):

            # Loop through iterations:
            for idxIt in range(0, varNumIt):

                # Loop through conditions:
                for idxCon in range(0, varNumCon):

                    # Interpolation back into equi-volume space:
                    aryDecon[idxSub, idxIt, idxCon, :] = \
                        griddata(vecPosMdl,
                                 aryDecon5[idxSub, idxIt, idxCon, :],
                                 vecIntpEqui,
                                 method='cubic')

        # For model 5, also resample systematic error term:
        if varMdl == 5:

            # Loop through conditions:
            for idxCon in range(0, varNumCon):

                # Interpolation for lower limit of systematic error term:
                arySys[idxSub, 0, idxCon, :] = \
                    griddata(vecPosMdl,
                             arySys5[idxSub, 0, idxCon, :],
                             vecIntpEqui,
                             method='cubic')

                # Interpolation for upper limit of systematic error term:
                arySys[idxSub, 1, idxCon, :] = \
                    griddata(vecPosMdl,
                             arySys5[idxSub, 1, idxCon, :],
                             vecIntpEqui,
                             method='cubic')

        # For model 6, loop through deep-GM-signal-intensity-scaling-factors:
        if varMdl == 6:

            # The array now has the form: aryDecon[idxSub, idxFctr, idxCon,
            # idxDepth], where idxFctr corresponds to the
            # deep-GM-signal-intensity-scaling-factors.

            # Loop through factors:
            for idxFctr in range(len(lstFctr)):

                # Loop through conditions:
                for idxCon in range(varNumCon):

                    # Interpolation back into equi-volume space:
                    aryDecon[idxSub, idxFctr, idxCon, :] = \
                        griddata(vecPosMdl,
                                 aryDecon5[idxSub, idxFctr, idxCon, :],
                                 vecIntpEqui,
                                 method='cubic')

    # -------------------------------------------------------------------------
    # *** Save corrected depth profiles

    if (varMdl != 4) and (varMdl != 5) and (varMdl != 6):

        # Save deconvolved depth profiles to disk. The depth profile for each
        # condition is saved to a separate file (for consistency):

        for idxCon in range(varNumCon):

            # Form of the array that is saved to disk:
            # aryDecon[subject, depth]

            # In addition, a vector with the number of vertices (for that ROI
            # in tha subject) is saved, in order to be able to normalise when
            # averaging over subjects. Shape: vecNumInc[subject]

            # Save subject-level depth profiles, and number of vertices per
            # subject:
            np.savez(strPthPrfOt.format(lstCon[idxCon]),
                     arySubDpthMns=aryDecon[:, idxCon, :],
                     vecNumInc=vecNumInc)

    # -------------------------------------------------------------------------
    # *** Peak positions percentile bootstrap

    # Bootstrapping in order to obtain an estimate of across-subjects variance
    # is not performed for models 4 & 5. (Models 4 & 5 are used to test the
    # effect of error in the model assumptions.)
    if (varMdl != 4) and (varMdl != 5) and (varMdl != 6):

        print('---Peak positions in depth profiles - percentile bootstrap')

        # We bootstrap the peak finding. Peak finding needs to be performed
        # both before and after deconvolution, separately for all stimulus
        # conditions.

        # Random array with subject indicies for bootstrapping of the form
        # aryRnd[varNumIt, varNumSmp]. Each row includes the indicies of the
        # subjects to the sampled on that iteration.
        aryRnd = np.random.randint(0,
                                   high=varNumSub,
                                   size=(varNumIt, varNumSub))

        # Loop before/after deconvolution:
        for idxDec in range(2):

            if idxDec == 0:
                print('------UNCORRECTED')

            if idxDec == 1:
                print('------CORRECTED')

            # Array for peak positins before deconvolution, of the form
            # aryPks01[idxCondition, idxIteration]
            aryPks01 = np.zeros((2, varNumCon, varNumIt))

            # Array for actual bootstrap samples:
            aryBoo = np.zeros((varNumIt, varNumDpth))

            # Loop through conditions:
            for idxCon in range(0, varNumCon):

                # Create array with bootstrap samples:
                for idxIt in range(0, varNumIt):

                    # Before deconvolution:
                    if idxDec == 0:
                        # Take mean across subjects in bootstrap samples:
                        aryBoo[idxIt, :] = np.mean(aryEmpSnSb[aryRnd[idxIt, :],
                                                              idxCon, :],
                                                   axis=0)

                    # After deconvolution:
                    if idxDec == 1:
                        # Take mean across subjects in bootstrap samples:
                        aryBoo[idxIt, :] = np.mean(aryDecon[aryRnd[idxIt, :],
                                                            idxCon, :],
                                                   axis=0)

                # Find peaks:
                aryPks01[idxDec, idxCon, :] = find_peak(aryBoo,
                                                        varNumIntp=100,
                                                        varSd=0.05,
                                                        lgcStat=False)

                # Median peak position:
                varTmpMed = np.median(aryPks01[idxDec, idxCon, :])

                # Confidence interval (percentile bootstrap):
                varTmpCnfLw = np.percentile(aryPks01[idxDec, idxCon, :],
                                            varCnfLw)
                varTmpCnfUp = np.percentile(aryPks01[idxDec, idxCon, :],
                                            varCnfUp)

                # Print result:
                strTmp = ('---------Median peak position: ' +
                          str(np.around(varTmpMed, decimals=2)))
                print(strTmp)
                strTmp = ('---------Percentile bootstrap ' +
                          str(np.around(varCnfLw, decimals=1)) + '%: ' +
                          str(np.around(varTmpCnfLw, decimals=2)))
                print(strTmp)
                strTmp = ('---------Percentile bootstrap ' +
                          str(np.around(varCnfUp, decimals=1)) + '%: ' +
                          str(np.around(varTmpCnfUp, decimals=2)))
                print(strTmp)

    # -------------------------------------------------------------------------
    # *** Plot results

    print('---Plot results')

    if (varMdl != 4) and (varMdl != 5) and (varMdl != 6):

        # Plot across-subjects mean before deconvolution:
        strTmpTtl = '{} before deconvolution'.format(strRoi.upper())
        strTmpPth = (strPthPltOt + 'before_')
        plt_dpth_prfl_acr_subs(aryEmpSnSb,
                               varNumSub,
                               varNumDpth,
                               varNumCon,
                               varDpi,
                               varAcrSubsYmin01,
                               varAcrSubsYmax01,
                               lstConLbl,
                               strXlabel,
                               strYlabel,
                               strTmpTtl,
                               strTmpPth,
                               strFlTp,
                               strErr='sem',
                               vecX=vecPosEmp,
                               vecWghts=vecNumInc,
                               varNumLblY=varNumLblY,
                               tplPadY=tplPadY)

        # Across-subjects mean after deconvolution:
        strTmpTtl = '{} after deconvolution'.format(strRoi.upper())
        strTmpPth = (strPthPltOt + 'after_')
        plt_dpth_prfl_acr_subs(aryDecon,
                               varNumSub,
                               varNumDpth,
                               varNumCon,
                               varDpi,
                               varAcrSubsYmin02,
                               varAcrSubsYmax02,
                               lstConLbl,
                               strXlabel,
                               strYlabel,
                               strTmpTtl,
                               strTmpPth,
                               strFlTp,
                               strErr='sem',
                               vecX=vecIntpEqui,
                               vecWghts=vecNumInc,
                               varNumLblY=varNumLblY,
                               tplPadY=tplPadY)

    elif varMdl == 4:

        # For 'model 4', i.e. the random noise model, we are interested in the
        # variance across random-noise iterations. We are *not* interested in
        # the variance across subjects in this case. Because we used the same
        # random noise across subjects, we can average over subjects.
        aryDecon = np.mean(aryDecon, axis=0)

        # Across-subjects mean after deconvolution:
        strTmpTtl = '{} after deconvolution'.format(strRoi.upper())
        strTmpPth = (strPthPltOt + 'after_')
        plt_dpth_prfl_acr_subs(aryDecon,
                               varNumSub,
                               varNumDpth,
                               varNumCon,
                               varDpi,
                               varAcrSubsYmin02,
                               varAcrSubsYmax02,
                               lstConLbl,
                               strXlabel,
                               strYlabel,
                               strTmpTtl,
                               strTmpPth,
                               strFlTp,
                               strErr='prct95',
                               vecX=vecIntpEqui)

    elif varMdl == 5:

        # For 'model 5', i.e. the random & systematic noise model, we are
        # interested in the variance across random-noise iterations. We are
        # *not* interested in the variance across subjects in this case.
        # Because we used the same random noise across subjects, we can average
        # over subjects.
        aryDecon = np.mean(aryDecon, axis=0)

        # Random noise - mean across iteratins:
        aryRndMne = np.mean(aryDecon, axis=0)
        # Random noise -  lower percentile:
        aryRndConfLw = np.percentile(aryDecon, varCnfLw, axis=0)
        # Random noise - upper percentile:
        aryRndConfUp = np.percentile(aryDecon, varCnfUp, axis=0)

        # For model 5, we only plot one stimulus condition (condition 4):
        varTmpCon = 3
        aryRndMne = aryRndMne[varTmpCon, :]
        aryRndConfLw = aryRndConfLw[varTmpCon, :]
        aryRndConfUp = aryRndConfUp[varTmpCon, :]

        # Systematic error term - mean across subjects:
        arySysMne = np.mean(arySys, axis=0)

        # Patching together systematic and random error terms:
        aryComb = np.array([
            aryRndMne, arySysMne[0, varTmpCon, :], arySysMne[1, varTmpCon, :]
        ])

        # aryRndMne.shape
        # arySysMne[0, varTmpCon, :].shape
        # arySysMne[1, varTmpCon, :].shape

        # Patching together array for error shading (no shading for systematic
        # error term):
        aryErrLw = np.array([
            aryRndConfLw, arySysMne[0, varTmpCon, :], arySysMne[1,
                                                                varTmpCon, :]
        ])
        aryErrUp = np.array([
            aryRndConfUp, arySysMne[0, varTmpCon, :], arySysMne[1,
                                                                varTmpCon, :]
        ])

        # *** Plot response at half maximum contrast across depth

        strTmpTtl = '{}'.format(strRoi.upper())
        strTmpPth = (strPthPltOt + 'after_')

        # Labels for model 5:
        lstLblMdl5 = ['Random error', 'Systematic error', 'Systematic error']

        # Colour for systematic error plot:
        aryClr = np.array(([22.0, 41.0, 248.0], [230.0, 56.0,
                                                 60.0], [230.0, 56.0, 60.0]))
        aryClr = np.divide(aryClr, 255.0)

        plt_dpth_prfl(
            aryComb,  # aryData[Condition, Depth]
            0,  # aryError[Con., Depth]
            varNumDpth,  # Number of depth levels (on the x-axis)
            3,  # Number of conditions (separate lines)
            varDpi,  # Resolution of the output figure
            0.0,  # Minimum of Y axis
            2.0,  # Maximum of Y axis
            False,  # Bool.: whether to convert y axis to %
            lstLblMdl5,  # Labels for conditions (separate lines)
            strXlabel,  # Label on x axis
            strYlabel,  # Label on y axis
            strTmpTtl,  # Figure title
            True,  # Boolean: whether to plot a legend
            (strPthPltOt + 'after' + strFlTp),
            varSizeX=2000.0,
            varSizeY=1400.0,
            aryCnfLw=aryErrLw,
            aryCnfUp=aryErrUp,
            aryClr=aryClr,
            vecX=vecIntpEqui)

    elif varMdl == 6:

        # The array now has the form: aryDecon[idxSub, idxFctr, idxCon,
        # idxDepth], where idxFctr corresponds to the
        # deep-GM-signal-intensity-scaling-factors.

        # For 'model 6', i.e. the deep-GM signal underestimation model, we are
        # *not* interested in the variance across subjects, but in the effect
        # of the deep-GM signal scaling factor. Because we used the same
        # deep-GM scaling factor across subjects, we can average over subjects.
        aryDecon = np.mean(aryDecon, axis=0)

        # The array now has the form: aryDecon[idxFctr, idxCon, idxDepth],
        # where idxFctr corresponds to the
        # deep-GM-signal-intensity-scaling-factors.

        # Reduce further; only one stimulus condition is plotted. The
        # deep-GM-signal-intensity-scaling-factors are treated as conditions
        # for the plot.
        aryDecon = aryDecon[:, 3, :]

        # The array now has the form: aryDecon[idxFctr, idxDepth], where
        # idxFctr corresponds to the deep-GM-signal-intensity-scaling-factors.

        # Dummy error array (no error will be plotted):
        aryErr = np.zeros(aryDecon.shape)

        strTmpTtl = '{}'.format(strRoi.upper())
        strTmpPth = (strPthPltOt + 'after_')

        # Labels for model 6 (deep-GM-signal-intensity-scaling-factors):
        lstLblMdl5 = [(str(int(np.around(x * 100.0))) + ' %') for x in lstFctr]

        plt_dpth_prfl(
            aryDecon,  # aryData[Condition, Depth]
            aryErr,  # aryError[Con., Depth]
            varNumDpth,  # Number of depth levels (on x-axis)
            aryDecon.shape[0],  # Number conditions (separate lines)
            varDpi,  # Resolution of the output figure
            0.0,  # Minimum of Y axis
            2.0,  # Maximum of Y axis
            False,  # Bool: convert y axis to % ?
            lstLblMdl5,  # Condition labels (separate lines)
            strXlabel,  # Label on x axis
            strYlabel,  # Label on y axis
            strTmpTtl,  # Figure title
            True,  # Boolean: whether to plot a legend
            (strPthPltOt + 'after' + strFlTp),
            varSizeX=2000.0,
            varSizeY=1400.0,
            vecX=vecIntpEqui)

    # -------------------------------------------------------------------------
    print('-Done.')
                    # (2) Calculate difference between conditions (within
                    #     subjects).
                    aryDiff = np.subtract(aryDpth01, aryDpth02)

                    # (3) Mean across subjects.
                    aryMneA[idxDiff, :] = np.mean(aryDiff, axis=0)

                    # Standard deviation:
                    aryStdA[idxDiff, :] = np.std(aryDiff, axis=0)

                # Plot results:
                plt_dpth_prfl(
                    aryMneA, aryStdA, varNumDpth, varNumDiff, varDpi, varYmin,
                    varYmax, False, lstConLbl,
                    'Cortical depth level (equivolume)',
                    'fMRI signal change (arbitraty units)',
                    (lstRoi[idxRoi].upper() + ' ' +
                     lstHmsph[idxHmsph].upper()), True,
                    (strPthPltOt.format(lstMetaCon[idxMtaCn], lstRoi[idxRoi],
                                        lstHmsph[idxHmsph], lstMdl[idxMdl]) +
                     'approach_A' + strFlTp))
# -----------------------------------------------------------------------------

# -----------------------------------------------------------------------------
# *** Approach B

# Approach B:
# (1) Calculate difference between conditions (within subject).
# (2) Apply deconvolution (on differences, separately for each subject).
# (3) Take mean across subjects.

# Loop through models, ROIs, hemispheres, and conditions to create plots:
Beispiel #3
0
def diff_sem(
        objDpth,
        strPath,
        lstCon,
        lstConLbl,
        strTtl='',
        varYmin=0.0,  #noqa
        varYmax=2.0,
        tplPadY=(0.0, 0.0),
        varNumLblY=5,
        varDpi=80.0,
        strXlabel='Cortical depth level (equivolume)',
        strYlabel='fMRI signal change [arbitrary units]',
        lgcLgnd=False,
        lstDiff=None,
        vecNumInc=None,
        strParam='mean'):
    """
    Plot across-subject cortical depth profiles with SEM.

    Parameters
    ----------
    objDpth : np.array or str
        Array with single-subject cortical depth profiles, of the form:
        aryDpth[idxSub, idxCondition, idxDpth]. Either a numpy array or a
        string with the path to an npy file containing the array.
    strPath : str
        Output path for plot.
    lstCon : list
        Abbreviated condition levels used to complete file names (e.g. 'Pd').
    lstConLbl : list
        List containing condition labels (strings). Number of condition labels
        has to be the same as number of conditions in `lstCon`.
    strTtl : str
        Plot title.
    varYmin : float
        Minimum of Y axis.
    varYmax : float
        Maximum of Y axis.
    tplPadY : tuple
        Padding around labelled values on y.
    varNumLblY : int
        Number of labels on y axis.
    varDpi : float
        Resolution of the output figure.
    strXlabel : str
        Label for x axis.
    strYlabel : str
        Label for y axis.
    lgcLgnd : bool
        Whether to show a legend.
    lstDiff : list or None
        If None, the depth profiles are plotted separately for each condition.
        If a list of tuples of condition indices is provided, the difference
        between the two conditions is calculated, and is plotted. The the
        second condition from the tuple is subtracted from the first (e.g. if
        lstDiff = [(0, 1)], then condition 1 is subtracted from condition 0).
    vecNumInc : np.array
        1D array with weights for weighted averaging over subjects (e.g. number
        of vertices per subject). If the array is loaded from disk (i.e. if
        `objDpth` is the path to an `*.npz` file stored on disk), `vecNumInc`
        has to be in the `*.npz` file. If `objDpth` is a numpy array containing
        the data, `vecNumInc` should also be provided as an input arguments.
        Otherwise, weights are set to be equal across subjects.
    strParam : string
        Which parameter to plot; 'mean' or 'median'. If `strParam = 'median'`,
        an R function is imported for calculating the weighted median.
        Dependency (in python): `rpy2`, dependency (in R): `spatstat`.

    Returns
    -------
    None : None
        This function has no return value.

    Notes
    -----
    Plot across-subject mean or median cortical depth profiles with SEM.

    Function of the depth sampling pipeline.
    """
    # -------------------------------------------------------------------------
    # *** Prepare bootstrapping

    # Test whether the input is a numpy array or a string (with the path to a
    # numpy array):
    lgcAry = (type(objDpth) == np.ndarray)
    lgcStr = (type(objDpth) == str)

    # If input is a string, load array from npy file:
    if lgcAry:
        aryDpth = objDpth
        # If weights are not provided, set equal weight of one for each
        # subject:
        if vecNumInc is None:
            vecNumInc = np.ones((aryDpth.shape[0]))

    elif lgcStr:
        # Load array for first condition to get dimensions:
        objNpz = np.load(objDpth.format(lstCon[0]))
        aryTmpDpth = objNpz['arySubDpthMns']
        # Number of subjects:
        varNumSub = aryTmpDpth.shape[0]
        # Get number of depth levels from input array:
        varNumDpth = aryTmpDpth.shape[1]
        # Number of conditions:
        varNumCon = len(lstCon)
        # Array for depth profiles of form aryDpth[subject, condition, depth]:
        aryDpth = np.zeros((varNumSub, varNumCon, varNumDpth))
        # Load single-condition arrays from disk:
        for idxCon in range(varNumCon):
            objNpz = np.load(objDpth.format(lstCon[idxCon]))
            aryDpth[:, idxCon, :] = objNpz['arySubDpthMns']
        # Array with number of vertices (for weighted averaging across
        # subjects), shape: vecNumInc[subjects].
        vecNumInc = objNpz['vecNumInc']

    else:
        print(('---Error in bootPlot: input needs to be numpy array or path ' +
               'to numpy array.'))

    # Get number of subjects from input array:
    varNumSub = aryDpth.shape[0]
    # Get number of conditions from input array:
    varNumCon = aryDpth.shape[1]
    # Get number of depth levels from input array:
    varNumDpth = aryDpth.shape[2]

    # Define R function for calculation of weighted median:
    strFuncR = """
     funcR <- function(lstData, lstWght){
     library(spatstat)
     varWm <- weighted.median(lstData, lstWght)
     return(varWm)
     }
    """
    objFuncR = robjects.r(strFuncR)

    if not (lstDiff is None):
        # Set number of comparisons:
        varNumCon = len(lstDiff)

    # Array for SEM:
    arySem = np.zeros((varNumCon, varNumDpth))

    # ------------------------------------------------------------------------
    # *** Calculate parameters

    if lstDiff is None:

        if strParam == 'mean':

            # Sum of weights over subjects (i.e. total number of vertices
            # across subjects; for scaling).
            varSum = np.sum(vecNumInc)

            # Multiply depth profiles by weights (weights are broadcasted over
            # conditions and depth levels):
            aryTmp = np.multiply(aryDpth, vecNumInc[:, None, None])

            # Sum over subjects, and scale by total number of vertices:
            aryEmpMne = np.divide(np.sum(aryTmp, axis=0), varSum)

        elif strParam == 'median':

            # Array for weighted median difference between conditions:
            aryEmpMne = np.zeros((varNumCon, varNumDpth))

            # Calculate weighted median in R (yes this is slow):
            for idxCon in range(varNumCon):
                for idxDpth in range(varNumDpth):
                    aryEmpMne[idxCon, idxDpth] = \
                        objFuncR(list(aryDpth[:, idxCon, idxDpth]),
                                 list(vecNumInc))[0]

    else:

        # Empirical mean difference between conditions:
        aryEmpMne = np.zeros((varNumCon, varNumDpth))

        for idxDiff in range(varNumCon):

            if strParam == 'mean':

                # Sum of weights over subjects (i.e. total number of vertices
                # across subjects; for scaling).
                varSum = np.sum(vecNumInc)

                # Difference in cortical depth profiles between conditions:
                aryDiff = np.subtract(aryDpth[:, lstDiff[idxDiff][0], :],
                                      aryDpth[:, lstDiff[idxDiff][1], :])

                # Multiply depth profiles by weights (weights are broadcasted
                # over depth levels):
                aryDiff = np.multiply(aryDiff, vecNumInc[:, None])

                # Sum over subjects, and scale by total number of vertices:
                aryEmpMne[idxDiff, :] = np.divide(np.sum(aryDiff, axis=0),
                                                  varSum)

                # Formula for SEM according to Franz & Loftus (2012). Standard
                # errors and confidence intervals in within-subjects designs:
                # generalizing Loftus and Masson (1994) and avoiding the biases
                # of alternative accounts. Psychonomic Bulletin & Review,
                # 19(3), p. 398.
                arySem[idxDiff, :] = \
                    np.sqrt(
                            np.multiply(
                                        np.divide(
                                                  1.0,
                                                  np.multiply(
                                                              float(varNumSub),
                                                              (float(varNumSub) - 1.0)
                                                              )
                                                  ),
                                        np.sum(
                                               np.power(
                                                        np.subtract(
                                                                    np.subtract(
                                                                                aryDpth[:, lstDiff[idxDiff][0], :],
                                                                                aryDpth[:, lstDiff[idxDiff][1], :]
                                                                                ),
                                                                    np.mean(
                                                                            np.subtract(
                                                                                        aryDpth[:, lstDiff[idxDiff][0], :],
                                                                                        aryDpth[:, lstDiff[idxDiff][1], :]),
                                                                            axis=0
                                                                            ),
                                                                    ),
                                                        2.0
                                                        ),
                                               axis=0
                                               )
                                        )
                             )

            elif strParam == 'median':

                # Calculate weighted median difference between conditions in R
                # (yes this is slow):
                for idxDiff in range(varNumCon):

                    # Difference in cortical depth profiles between conditions:
                    aryDiff = np.subtract(aryDpth[:, lstDiff[idxDiff][0], :],
                                          aryDpth[:, lstDiff[idxDiff][1], :])

                    for idxDpth in range(varNumDpth):
                        aryEmpMne[idxDiff, idxDpth] = \
                            objFuncR(list(aryDiff[:, idxDpth]),
                                     list(vecNumInc))[0]

                # TODO: SEM for median

        # Create condition labels for differences:
        lstDiffLbl = [None] * varNumCon
        for idxDiff in range(varNumCon):
            lstDiffLbl[idxDiff] = ((lstConLbl[lstDiff[idxDiff][0]]) + ' - ' +
                                   (lstConLbl[lstDiff[idxDiff][1]]))
        lstConLbl = lstDiffLbl

        # Remove '_sst_pe' substring for better layout fit:
        lstConLbl = [s.replace('_sst_pe', '') for s in lstConLbl]

    # ------------------------------------------------------------------------
    # *** Plot results

    # For the plots of condition differences we use a different colour schemea
    # as for the plots of individual condition depth profiles.

    # Prepare colour map:
    objClrNorm = colors.Normalize(vmin=0, vmax=9)
    objCmap = plt.cm.tab10
    aryClr = np.zeros((varNumCon, 3))

    # Use custom colour scheme for PacMan data (three differences):
    if varNumCon == 3:
        aryClr[0, :] = objCmap(objClrNorm(9))[0:3]
        aryClr[1, :] = objCmap(objClrNorm(6))[0:3]
        aryClr[2, :] = objCmap(objClrNorm(8))[0:3]

    # Use tab10 colour map (but leave out first items, as those are used for
    # single condition plots).
    else:
        for idxCon in range(varNumCon):
            aryClr[idxCon, :] = \
                objCmap(objClrNorm(varNumCon + 2 - idxCon))[0:3]

    plt_dpth_prfl(aryEmpMne,
                  arySem,
                  varNumDpth,
                  varNumCon,
                  varDpi,
                  varYmin,
                  varYmax,
                  False,
                  lstConLbl,
                  strXlabel,
                  strYlabel,
                  strTtl,
                  lgcLgnd,
                  strPath,
                  varSizeX=1800.0,
                  varSizeY=1600.0,
                  varNumLblY=varNumLblY,
                  tplPadY=tplPadY,
                  aryClr=aryClr)
def boot_plot(
        objDpth,
        strPath,
        lstCon,
        lstConLbl,
        varNumIt=10000,  #noqa
        varConLw=2.5,
        varConUp=97.5,
        strTtl='',
        varYmin=0.0,
        varYmax=2.0,
        tplPadY=(0.0, 0.0),
        strXlabel='Cortical depth level (equivolume)',
        strYlabel='fMRI signal change [arbitrary units]',
        lgcLgnd=False,
        lstDiff=None,
        vecNumInc=None,
        strParam='mean'):
    """
    Plot across-subject cortical depth profiles with confidence intervals.

    Parameters
    ----------
    objDpth : np.array or str
        Array with single-subject cortical depth profiles, of the form:
        aryDpth[idxSub, idxCondition, idxDpth]. Either a numpy array or a
        string with the path to an npy file containing the array.
    strPath : str
        Output path for plot.
    lstCon : list
        Abbreviated condition levels used to complete file names (e.g. 'Pd').
    lstConLbl : list
        List containing condition labels (strings). Number of condition labels
        has to be the same as number of conditions in `lstCon`.
    varNumIt : int
        Number of bootstrap iterations.
    varConLw : float
        Lower bound of the percentile bootstrap confidence interval in
        percent (i.e. in range of [0, 100]).
    varConUp : float
        Upper bound of the percentile bootstrap confidence interval in
        percent (i.e. in range of [0, 100]).
    strTtl : str
        Plot title.
    varYmin : float
        Minimum of Y axis.
    varYmax : float
        Maximum of Y axis.
    tplPadY : tuple
        Padding around labelled values on y.
    strXlabel : str
        Label for x axis.
    strYlabel : str
        Label for y axis.
    lgcLgnd : bool
        Whether to show a legend.
    lstDiff : list or None
        If None, the depth profiles are plotted separately for each condition.
        If a list of tuples of condition indices is provided, on each
        bootstrapping iteration the difference between the two conditions is
        calculated, and is plotted. The the second condition from the tuple is
        subtracted from the first (e.g. if lstDiff = [(0, 1)], then condition 1
        is subtracted from condition 0).
    vecNumInc : np.array
        1D array with weights for weighted averaging over subjects (e.g. number
        of vertices per subject). If the array is loaded from disk (i.e. if
        `objDpth` is the path to an `*.npz` file stored on disk), `vecNumInc`
        has to be in the `*.npz` file. If `objDpth` is a numpy array containing
        the data, `vecNumInc` should also be provided as an input arguments.
        Otherwise, weights are set to be equal across subjects.
    strParam : string
        Which parameter to plot; 'mean' or 'median'. If `strParam = 'median'`,
        an R function is imported for calculating the weighted median.
        Dependency (in python): `rpy2`, dependency (in R): `spatstat`.

    Returns
    -------
    None : None
        This function has no return value.

    Notes
    -----
    Plot across-subject mean or median cortical depth profiles with percentile
    bootstrap confidence intervals. This function bootstraps (i.e. resamples
    with replacement) from an array of single-subject depth profiles,
    calculates a confidence interval of the mean/median across bootstrap
    iterations and plots the empirical mean/median & bootstrap confidence
    intervals along the cortical depth.

    Function of the depth sampling pipeline.
    """
    # -------------------------------------------------------------------------
    # *** Prepare bootstrapping

    # Test whether the input is a numpy array or a string (with the path to a
    # numpy array):
    lgcAry = (type(objDpth) == np.ndarray)
    lgcStr = (type(objDpth) == str)

    # If input is a string, load array from npy file:
    if lgcAry:
        aryDpth = objDpth
        # If weights are not provided, set equal weight of one for each
        # subject:
        if vecNumInc is None:
            vecNumInc = np.ones((aryDpth.shape[0]))

    elif lgcStr:
        # Load array for first condition to get dimensions:
        objNpz = np.load(objDpth.format(lstCon[0]))
        aryTmpDpth = objNpz['arySubDpthMns']
        # Number of subjects:
        varNumSub = aryTmpDpth.shape[0]
        # Get number of depth levels from input array:
        varNumDpth = aryTmpDpth.shape[1]
        # Number of conditions:
        varNumCon = len(lstCon)
        # Array for depth profiles of form aryDpth[subject, condition, depth]:
        aryDpth = np.zeros((varNumSub, varNumCon, varNumDpth))
        # Load single-condition arrays from disk:
        for idxCon in range(varNumCon):
            objNpz = np.load(objDpth.format(lstCon[idxCon]))
            aryDpth[:, idxCon, :] = objNpz['arySubDpthMns']
        # Array with number of vertices (for weighted averaging across
        # subjects), shape: vecNumInc[subjects].
        vecNumInc = objNpz['vecNumInc']

    else:
        print(('---Error in bootPlot: input needs to be numpy array or path ' +
               'to numpy array.'))

    # Get number of subjects from input array:
    varNumSub = aryDpth.shape[0]
    # Get number of conditions from input array:
    varNumCon = aryDpth.shape[1]
    # Get number of depth levels from input array:
    varNumDpth = aryDpth.shape[2]

    # We will sample subjects with replacement. How many subjects to sample on
    # each iteration:
    varNumSmp = varNumSub

    # Random array with subject indicies for bootstrapping of the form
    # aryRnd[varNumIt, varNumSmp]. Each row includes the indicies of the
    # subjects to the sampled on that iteration.
    aryRnd = np.random.randint(0, high=varNumSub, size=(varNumIt, varNumSmp))

    if lstDiff is None:
        # Array for bootstrap samples, of the form
        # aryBoo[idxIteration, idxSubject, idxCondition, idxDpth]):
        aryBoo = np.zeros((varNumIt, varNumSub, varNumCon, varNumDpth))
    else:
        # Set number of comparisons:
        varNumCon = len(lstDiff)
        # Array for bootstrap samples:
        aryBoo = np.zeros((varNumIt, varNumSub, varNumCon, varNumDpth))

    # Array with number of vertices per subject for each bootstrapping sample
    # (needed for weighted averaging), shape: aryWght[iterations, subjects]
    aryWght = np.zeros((varNumIt, varNumSub))

    # ------------------------------------------------------------------------
    # *** Bootstrap

    # Loop through bootstrap iterations:
    for idxIt in range(varNumIt):
        # Indices of current bootstrap sample:
        vecRnd = aryRnd[idxIt, :]
        if lstDiff is None:
            # Put current bootstrap sample into array:
            aryBoo[idxIt, :, :, :] = aryDpth[vecRnd, :, :]
        else:
            # Calculate difference between conditions:
            for idxDiff in range(varNumCon):
                aryBoo[idxIt, :, idxDiff, :] = \
                    np.subtract(aryDpth[vecRnd, lstDiff[idxDiff][0], :],
                                aryDpth[vecRnd, lstDiff[idxDiff][1], :])

        # Put number of vertices per subject into respective array (for
        # weighted averaging):
        aryWght[idxIt, :] = vecNumInc[vecRnd]

    if strParam == 'mean':

        # Mean for each bootstrap sample (across subjects within the bootstrap
        # sample):

        # Sum of weights over subjects (i.e. total number of vertices across
        # subjects, one value per iteration; for scaling).
        vecSum = np.sum(aryWght, axis=1)

        # Multiply depth profiles by weights (weights are broadcasted over
        # conditions and depth levels):
        aryTmp = np.multiply(aryBoo, aryWght[:, :, None, None])

        # Sum over subjects, and scale by number of vertices (sum of vertices
        # is broadcasted over conditions and depth levels):
        aryBooMne = np.divide(np.sum(aryTmp, axis=1), vecSum[:, None, None])

    elif strParam == 'median':

        # Define R function for calculation of weighted median:
        strFuncR = """
         funcR <- function(lstData, lstWght){
         library(spatstat)
         varWm <- weighted.median(lstData, lstWght)
         return(varWm)
         }
        """
        objFuncR = robjects.r(strFuncR)

        # Array for weighted median difference between conditions:
        aryBooMne = np.zeros((varNumIt, varNumCon, varNumDpth))

        # Calculate weighted median difference between conditions in R (yes
        # this is slow):
        for idxIt in range(varNumIt):
            for idxCon in range(varNumCon):
                for idxDpth in range(varNumDpth):
                    aryBooMne[idxIt, idxCon, idxDpth] = \
                        objFuncR(list(aryBoo[idxIt, :, idxCon, idxDpth]),
                                 list(aryWght[idxIt, :]))[0]

    # Delete large bootstrap array:
    del (aryBoo)

    # Percentile bootstrap for mean:
    aryPrct = np.percentile(aryBooMne, (varConLw, varConUp), axis=0)

    # ------------------------------------------------------------------------
    # *** Plot result

    if lstDiff is None:

        if strParam == 'mean':

            # Sum of weights over subjects (i.e. total number of vertices
            # across subjects; for scaling).
            varSum = np.sum(vecNumInc)

            # Multiply depth profiles by weights (weights are broadcasted over
            # conditions and depth levels):
            aryTmp = np.multiply(aryDpth, vecNumInc[:, None, None])

            # Sum over subjects, and scale by total number of vertices:
            aryEmpMne = np.divide(np.sum(aryTmp, axis=0), varSum)

        elif strParam == 'median':

            # Array for weighted median difference between conditions:
            aryEmpMne = np.zeros((varNumCon, varNumDpth))

            # Calculate weighted median in R (yes this is slow):
            for idxCon in range(varNumCon):
                for idxDpth in range(varNumDpth):
                    aryEmpMne[idxCon, idxDpth] = \
                        objFuncR(list(aryDpth[:, idxCon, idxDpth]),
                                 list(vecNumInc))[0]

    else:

        # Empirical mean difference between conditions:
        aryEmpMne = np.zeros((varNumCon, varNumDpth))

        for idxDiff in range(varNumCon):

            if strParam == 'mean':

                # Sum of weights over subjects (i.e. total number of vertices
                # across subjects; for scaling).
                varSum = np.sum(vecNumInc)

                # Difference in cortical depth profiles between conditions:
                aryDiff = np.subtract(aryDpth[:, lstDiff[idxDiff][0], :],
                                      aryDpth[:, lstDiff[idxDiff][1], :])

                # Un-comment this for SEM (overwrites bootstrapping results),
                # for comparison:
                # aryPrct[0, idxDiff, :] = np.divide(np.std(aryDiff, axis=0),
                #                                    np.sqrt(varNumSub)) * -1
                # aryPrct[1, idxDiff, :] = np.divide(np.std(aryDiff, axis=0),
                #                                    np.sqrt(varNumSub)) * 1

                # Multiply depth profiles by weights (weights are broadcasted
                # over depth levels):
                aryDiff = np.multiply(aryDiff, vecNumInc[:, None])

                # Sum over subjects, and scale by total number of vertices:
                aryEmpMne[idxDiff, :] = np.divide(np.sum(aryDiff, axis=0),
                                                  varSum)

                # Un-comment this for SEM (overwrites bootstrapping results),
                # for comparison:
                # aryPrct[0, idxDiff, :] = np.add(aryPrct[0, idxDiff, :],
                #                                 aryEmpMne[idxDiff, :])
                # aryPrct[1, idxDiff, :] = np.add(aryPrct[1, idxDiff, :],
                #                                 aryEmpMne[idxDiff, :])

            elif strParam == 'median':

                # Calculate weighted median difference between conditions in R
                # (yes this is slow):
                for idxDiff in range(varNumCon):

                    # Difference in cortical depth profiles between conditions:
                    aryDiff = np.subtract(aryDpth[:, lstDiff[idxDiff][0], :],
                                          aryDpth[:, lstDiff[idxDiff][1], :])

                    for idxDpth in range(varNumDpth):
                        aryEmpMne[idxDiff, idxDpth] = \
                            objFuncR(list(aryDiff[:, idxDpth]),
                                     list(vecNumInc))[0]

        # Create condition labels for differences:
        lstDiffLbl = [None] * varNumCon
        for idxDiff in range(varNumCon):
            lstDiffLbl[idxDiff] = ((lstConLbl[lstDiff[idxDiff][0]]) +
                                   ' minus ' +
                                   (lstConLbl[lstDiff[idxDiff][1]]))
        lstConLbl = lstDiffLbl

    plt_dpth_prfl(aryEmpMne,
                  None,
                  varNumDpth,
                  varNumCon,
                  80.0,
                  varYmin,
                  varYmax,
                  False,
                  lstConLbl,
                  strXlabel,
                  strYlabel,
                  strTtl,
                  lgcLgnd,
                  strPath,
                  varSizeX=1800.0,
                  varSizeY=1600.0,
                  varNumLblY=6,
                  tplPadY=tplPadY,
                  aryCnfLw=aryPrct[0, :, :],
                  aryCnfUp=aryPrct[1, :, :])
def ert_onset_depth(lstPthPic, strPthPlt, lstConLbl, varTr, varBse,
                    strTtl='Response onset time difference', strFleTpe='.svg'):
    """
    Plot response onset times over cortical depth.

    Parameters
    ----------
    lstPthPic : list
        List of strings with path(s) of pickle file containing event related
        time courses. The pickles can be created using `py_depthsampling.ert`.
        They contain one list per subject. Each list contains a numpy array
        with the event related time courses (separately for conditions and
        depth levels), and an integer - the number of vertices that contribute
        to the subject's time course. By passing in several strings, more than
        one region of interest can be plotted/analysed.
    strPthPlt : str
        Path & file name for plot.
    lstConLbl : list
        List of strings containing condition labels (one per input pickle).
    varTr : float
        Volume TR of the measurement, in seconds.
    varBse : int
        Time point of first volume after stimulus onset (index in event related
        time course). In other words, the index of the first volume on which
        the stimulus was on.
    strTtl : str
        Title for plot.
    strFleTpe : str
        File extension.

    Returns
    -------
    This function has no return value.

    Notes
    -----
    Plot response onset time by cortical depth. Event-related time courses
    are upsampled in order to estimate the onset time, separately for each
    subject.

    """
    # *************************************************************************
    # *** Preparations

    # Number of ROIs:
    varNumRoi = len(lstPthPic)

    # Number of bootstrap iterations:
    varNumIt = 1000

    # z-threshold for response onset. Onset is identified if signal is
    # above/below varThr times mean baseline signal.
    varThr = 2.5

    # Temporal upsampling factor:
    varUp = 100

    # Upper and lower bound for percentile bootstrap confidence interval:
    varConLw = 5.0
    varConUp = 95.0

    # *************************************************************************
    # *** Loop through ROIs

    # Note that here, 'ROI' may refer to stimulus & edge ROIs within e.g. V1.

    for idxRoi in range(varNumRoi):

        # Path of current input pickle file:
        strPthPic = lstPthPic[idxRoi]

        # Load previously prepared event-related timecourses from pickle:
        dicAllSubsRoiErt = pickle.load(open(strPthPic, 'rb'))

        # Get number of subjects, conditions, cortical depth levels, time
        # points (volumes):
        varNumSub = len(dicAllSubsRoiErt)

        tplShpe = list(dicAllSubsRoiErt.values())[0][0].shape
        varNumCon = tplShpe[0]
        varNumDpth = tplShpe[1]
        varNumVol = tplShpe[2]

        # On first iteration, initialise arrays:
        if idxRoi == 0:

            # Array for indices of response onset:
            aryFirst = np.zeros((varNumRoi, varNumDpth), dtype=np.int32)

            # Array with information on whether there is a detectable response
            # at all (`True` if a response onset was detected).
            aryLgc = np.zeros((varNumRoi, varNumDpth), dtype=np.bool)

            # Array for bootstrap onset times (needed for calculation of
            # bootstrap confidence intervals of response onset difference).
            aryFirstBoo = np.zeros((varNumRoi, varNumIt, varNumDpth),
                                   dtype=np.float32)

            # Array for percentile bootstrap of response onset (for confidence
            # interval of response onset). Shape: aryFirstPrc[ROI, depth,
            # upper/lower bound].
            aryFirstPrc = np.zeros((varNumRoi, varNumDpth, 2), dtype=np.int32)

            # Array with information on whether there is a detectable response
            # at all for each bootstrap iteration (`True` if a response onset
            # was detected).
            aryLgcBoo = np.zeros((varNumRoi, varNumIt, varNumDpth),
                                 dtype=np.bool)

            # Bootstrap preparations. We will sample subjects with replacement.
            # How many subjects to sample on each iteration:
            varNumSmp = varNumSub

            # Random array with subject indicies for bootstrapping of the form
            # aryRnd[varNumIt, varNumSmp]. Each row includes the indicies of
            # the subjects to be sampled on that iteration.
            aryRnd = np.random.randint(0,
                                       high=varNumSub,
                                       size=(varNumIt, varNumSmp))

        # *********************************************************************
        # *** Subtract baseline mean

        # The input to this function are timecourses that have been normalised
        # to the pre-stimulus baseline. The datapoints are signal intensity
        # relative to the pre-stimulus baseline, and the pre-stimulus baseline
        # has a mean of one. We subtract one, so that the datapoints are
        # percent signal change relative to baseline.
        for strSubID, lstItem in dicAllSubsRoiErt.items():
            # Get event related time courses from list (second entry in list is
            # the number of vertices contained in this ROI).
            aryRoiErt = lstItem[0]
            # Subtract baseline mean:
            aryRoiErt = np.subtract(aryRoiErt, 1.0)
            # Is this line necessary (hard copy)?
            dicAllSubsRoiErt[strSubID] = [aryRoiErt, lstItem[1]]

        # *********************************************************************
        # *** Create group level ERT

        # Create across-subjects data array of the form:
        # aryAllSubsRoiErt[varNumSub, varNumCon, varNumDpth, varNumVol]
        aryAllSubsRoiErt = np.zeros((varNumSub, varNumCon, varNumDpth,
                                     varNumVol))

        # Vector for number of vertices per subject (used for weighted
        # averaging):
        vecNumVrtcs = np.zeros((varNumSub), dtype=np.float32)

        idxSub = 0

        for lstItem in dicAllSubsRoiErt.values():

            # Get event related time courses from list.
            aryRoiErt = lstItem[0]

            # Get number of vertices for this subject:
            vecNumVrtcs[idxSub] = int(lstItem[1])

            aryAllSubsRoiErt[idxSub, :, :, :] = aryRoiErt

            idxSub += 1

        # The number of vertices are going to be used as weights, so we cast
        # to float:
        vecNumVrtcs = vecNumVrtcs.astype(np.float32)

        # *********************************************************************
        # *** Upsample timecourses

        # New number of volumes:
        varNumVolUp = varNumVol * varUp

        # Current shape:
        # aryAllSubsRoiErt[varNumSub, varNumCon, varNumDpth, varNumVol]

        # Within subject mean (over conditions):
        aryMneWthn = np.mean(aryAllSubsRoiErt, axis=1)

        # New shape:
        # aryAllSubsRoiErt[varNumSub, varNumDpth, varNumVol]

        # Position of original datapoints in time:
        vecPosEmp = np.arange(0.0, float(varNumVol))

        # Position of upsampled datapoints in time:
        vecPosUp = np.linspace(0.0, float(varNumVol - 1), num=varNumVolUp,
                               endpoint=True)

        # Array for upsampled timecourses:
        aryErtUp = np.zeros((varNumSub, varNumDpth, varNumVolUp),
                            dtype=np.float32)

        # Loop through subjects and depth levels (upsampling in 1D):
        for idxSub in range(varNumSub):
            for idxDpth in range(varNumDpth):

                # Interpolation:
                aryErtUp[idxSub, idxDpth, :] = griddata(
                        vecPosEmp, aryMneWthn[idxSub, idxDpth, :], vecPosUp,
                        method='cubic').astype(np.float32)

        # *********************************************************************
        # *** Compute onset time

        # Weighted mean across subjects:
        aryErtUpMne = np.average(aryErtUp, weights=vecNumVrtcs, axis=0)

        # Add array dimension, new shape: aryErtUpMne[1, depth, volumes]
        aryErtUpMne = aryErtUpMne[None, ].astype(np.float32)

        # Scale baseline interval:
        varBseUp = (varUp * varBse) - 1

        # Calculate onset times:
        aryTmp01, aryTmp02 = onset(aryErtUpMne, varBseUp, varThr)
        aryFirst[idxRoi, :] = aryTmp01[0, :]
        aryLgc[idxRoi, :] = aryTmp02

        # *********************************************************************
        # *** Bootstrap onset time

        # Array for bootstrap samples:
        aryBoo = np.zeros((varNumIt, varNumSub, varNumDpth, varNumVolUp),
                          dtype=np.float32)

        # Array with number of vertices per subject for each bootstrapping
        # sample (needed for weighted averaging), shape: aryWght[iterations,
        # subjects]
        aryWght = np.zeros((varNumIt, varNumSub), dtype=np.float32)

        # Loop through bootstrap iterations:
        for idxIt in range(varNumIt):
            # Indices of current bootstrap sample:
            vecRnd = aryRnd[idxIt, :]
            # Put current bootstrap sample into array:
            aryBoo[idxIt, :, :, :] = aryErtUp[vecRnd, :, :]
            # Put number of vertices per subject into respective array (for
            # weighted averaging):
            aryWght[idxIt, :] = vecNumVrtcs[vecRnd]

        # Weightes mean for each bootstrap sample (across subjects within the
        # bootstrap sample):

        # Sum of weights over subjects (i.e. total number of vertices across
        # subjects, one value per iteration; for scaling).
        vecSum = np.sum(aryWght, axis=1)

        # Multiply depth profiles by weights (weights are broadcasted over
        # depth levels and volumes):
        aryTmp = np.multiply(aryBoo, aryWght[:, :, None, None])

        # Sum over subjects, and scale by number of vertices (sum of vertices
        # is broadcasted over conditions and depth levels):
        aryBooMne = np.divide(
                              np.sum(aryTmp, axis=1),
                              vecSum[:, None, None]
                              )
        # Resulting shape: aryBooMne[iterations, depth, volumes].

        # Delete large bootstrap array:
        del(aryBoo)

        # Calculate onset times, return value has shape [iterations, depth].
        aryFirstBoo[idxRoi, :, :], aryLgcBoo[idxRoi, :, :] = onset(
            aryBooMne, varBseUp, varThr)

    # *************************************************************************
    # *** Percentile boostrap - onset time

    # Was an onset detected in both ROIs? Shape: aryLgcBoo[iterations, depth].
    aryLgcBoo = np.min(aryLgcBoo, axis=0)

    # In order to exclude cases in which no onset was detected, we need to loop
    # through ROIs and depth levels (np.percentile does not accept weights).
    for idxRoi in range(varNumRoi):

        for idxDpth in range(varNumDpth):

            # Temporary vector for onset times on iterations without iterations
            # where no response was detected.
            vecTmp = aryFirstBoo[idxRoi, :, idxDpth]
            vecTmp = vecTmp[aryLgcBoo[:, idxDpth]]

            vecPrct = np.percentile(vecTmp,
                                    (varConLw, varConUp),
                                    axis=0)

            aryFirstPrc[idxRoi, idxDpth, 0] = vecPrct[0]
            aryFirstPrc[idxRoi, idxDpth, 1] = vecPrct[1]

    # *************************************************************************
    # *** Percentile boostrap - onset time difference

    # Was a response detected in both ROIs? Shape: vecLgc[depth].
    # vecLgc = np.min(aryLgc, axis=0)

    # Group level onset time difference, shape: vec[depth].
    vecDiff = np.subtract(aryFirst[0, :], aryFirst[1, :])

    # Scale result to seconds & new shape (for plot function):
    aryDiff = np.divide(
                        np.multiply(vecDiff,
                                    varTr),
                        float(varUp)).reshape(1, varNumDpth)

    # Onset difference for bootstrap samples:
    aryDiffBoo = np.subtract(aryFirstBoo[0, :, :],
                             aryFirstBoo[1, :, :])
    # Shape: aryDiffBoo[varNumIt, varNumDpth]

    # Scale result to seconds:
    aryDiffBoo = np.divide(
                           np.multiply(aryDiffBoo,
                                       varTr),
                           float(varUp))

    # Array for percentile bootstrap CI of onset difference.
    aryOnsetPrct = np.zeros((2, varNumDpth))
    # Shape: aryOnsetPrct[lower/upper bound, depth]

    # Was an onset detected in both ROIs? Shape: aryLgcBoo[iterations, depth].
    # aryLgcBoo = np.min(aryLgcBoo, axis=0)

    # In order to exclude cases in which no onset was detected, we need to
    # loop through depth levels (np.percentile does not accept weights).
    for idxDpth in range(varNumDpth):

        # Temporary vector for onset times on iterations without iterations
        # where no response was detected.
        vecTmp = aryDiffBoo[:, idxDpth]
        vecTmp = vecTmp[aryLgcBoo[:, idxDpth]]

        # Percentile bootstrap:
        aryOnsetPrct[:, idxDpth] = np.percentile(vecTmp,
                                                 (varConLw, varConUp),
                                                 axis=0)

    # *************************************************************************
    # *** Plot onset time

    # Scale result to seconds:
    aryFirst = np.divide(
                         np.multiply(aryFirst.astype(np.float64),
                                     varTr),
                         float(varUp))
    aryFirstPrc = np.divide(
                            np.multiply(aryFirstPrc.astype(np.float64),
                                        varTr),
                            float(varUp))

    # Subtract per-stimulus baseline:
    aryFirst = np.subtract(aryFirst, (float(varBse) * varTr))
    aryFirstPrc = np.subtract(aryFirstPrc, (float(varBse) * varTr))

    # Output file path:
    strPthOut = strPthPlt + 'onset_by_depth' + strFleTpe

    # Plot parameters:
    varYmin = 0.0
    varYmax = 4.0
    varNumLblY = 3
    tplPadY = (0.0, 1.5)
    lgcLgnd = True
    strXlabel = 'Cortical depth'
    strYlabel = 'Onset time [s]'
    aryClr = np.array([[49.0, 128.0, 182.0],
                       [253.0, 134.0, 47.0]])
    aryClr = np.divide(aryClr, 255.0)

    plt_dpth_prfl(aryFirst, None, varNumDpth, 2, 90.0, varYmin,
                  varYmax, False, lstConLbl, strXlabel, strYlabel, strTtl,
                  lgcLgnd, strPthOut, varSizeX=1440.0, varSizeY=1200.0,
                  varNumLblY=varNumLblY, tplPadY=tplPadY, aryClr=aryClr,
                  aryCnfLw=aryFirstPrc[:, :, 0], aryCnfUp=aryFirstPrc[:, :, 1])

    # *************************************************************************
    # *** Plot onset time difference

    # Plot parameters:
    varYmin = 0.0
    varYmax = 3.0
    varNumLblY = 4
    tplPadY = (0.0, 0.3)
    lgcLgnd = False
    strXlabel = 'Cortical depth'
    strYlabel = 'Time difference [s]'

    # Output file path:
    strPthOut = strPthPlt + 'onsetdiff_by_depth' + strFleTpe

    plt_dpth_prfl(aryDiff, None, varNumDpth, 1, 90.0, varYmin,
                  varYmax, False, lstConLbl, strXlabel, strYlabel, strTtl,
                  lgcLgnd, strPthOut, varSizeX=1440.0, varSizeY=1200.0,
                  varNumLblY=varNumLblY, tplPadY=tplPadY,
                  aryCnfLw=aryOnsetPrct[0, :][None, :],
                  aryCnfUp=aryOnsetPrct[1, :][None, :])
Beispiel #6
0
        varMaxY = 0.3
        varNumLblY = 4
    elif idxScn == 2:
        varMinY = -0.3
        varMaxY = 0.3
        varNumLblY = 3

    # Plot components without draining vein effect:
    plt_dpth_prfl(lstScnCpy[idxScn],
                  aryError,
                  varNumDpth,
                  varNumCon, (varDpi * 1.8),
                  varMinY,
                  varMaxY,
                  False, ['1', '2'],
                  strXlabel,
                  strYlabel,
                  '',
                  False, (strPthOt + 'scenario_' + str(idxScn).zfill(2) +
                          '_components' + strFleTyp),
                  tplPadY=(0.01, 0.01),
                  varNumLblY=varNumLblY,
                  varRound=1)

    # Layout, depending on scenario:
    if (idxScn == 0) or (idxScn == 1):
        varMinY = 0.0
        varMaxY = 4.0
        varNumLblY = 5
    elif idxScn == 2:
        varMinY = -4.0
Beispiel #7
0
                        # Plot empirical condition difference and permutation
                        # null distribution:
                        plt_dpth_prfl(aryPlot01,
                                      None,
                                      varNumDpt,
                                      2,
                                      varDpi,
                                      varYmin,
                                      varYmax,
                                      False,
                                      ['Empirical condition difference',
                                       'Permutation null distribution'],
                                      'Cortical depth level',
                                      'fMRI signal change [%]',
                                      (lstRoi[idxRoi].upper()
                                       + ' '
                                       + lstHmsph[idxHmsph].upper()
                                       + ' '
                                       + strTtle),
                                      True,
                                      (strPthPltOt.format(lstMetaCon[idxMtaCn],
                                                          lstRoi[idxRoi],
                                                          lstHmsph[idxHmsph],
                                                          strPthCon,
                                                          lstMdl[idxMdl])
                                       + strFlTp),
                                      aryCnfLw=aryPlotErrLw,
                                      aryCnfUp=aryPlotErrUp)

                        # Reshape p-values for plot:
                        vecP = vecP.reshape((1, varNumDpt))
Beispiel #8
0
def acr_subs_get_data(idxPrc,              # Process ID  #noqa
                      strSubId,            # Data struc - Subject ID
                      lstVtkDpth01,        # Data struc - Pth vtk I
                      varNumDpth,          # Data struc - Num. depth levels
                      strPrcdData,         # Data struc - Str prcd VTK data
                      varNumLne,           # Data struc - Lns prcd data VTK
                      lgcSlct01,           # Criterion 1 - Yes or no?
                      strCsvRoi,           # Criterion 1 - CSV path
                      varNumHdrRoi,        # Criterion 1 - Header lines
                      lgcSlct02,           # Criterion 2 - Yes or no?
                      strVtkSlct02,        # Criterion 2 - VTK path
                      varThrSlct02,        # Criterion 2 - Threshold
                      lgcSlct03,           # Criterion 3 - Yes or no?
                      strVtkSlct03,        # Criterion 3 - VTK path
                      varThrSlct03,        # Criterion 3 - Threshold
                      lgcSlct04,           # Criterion 4 - Yes or no?
                      strVtkSlct04,        # Criterion 4 - VTK path
                      tplThrSlct04,        # Criterion 4 - Threshold
                      lgcNormDiv,          # Normalisation - Yes or no?
                      varNormIdx,          # Normalisation - Reference
                      varDpi,              # Plot - Dots per inch
                      varYmin,             # Plot - Minimum of Y axis
                      varYmax,             # Plot - Maximum of Y axis
                      lstConLbl,           # Plot - Condition labels
                      strXlabel,           # Plot - X axis label
                      strYlabel,           # Plot - Y axis label
                      strTitle,            # Plot - Title
                      strPltOtPre,         # Plot - Output file path prefix
                      strPltOtSuf,         # Plot - Output file path suffix
                      strMetaCon,          # Metacondition (stim/periphery)
                      queOut):             # Queue for output list
    """
    Obtaining & plotting single subject data for across subject analysis.

    This function loads the data for each subject for a multi-subject analysis
    and passes the data to the parent function for visualisation.
    """
    # Only print status messages if this is the first of several parallel
    # processes:
    if idxPrc == 0:
        print('------Loading single subject data: ' + strSubId)

    # *************************************************************************
    # *** Import data

    # Import CSV file with ROI definition
    if lgcSlct01:
        if idxPrc == 0:
            print('---------Importing CSV file with ROI definition (first '
                  + 'criterion)')
        aryRoiVrtx = load_csv_roi(strCsvRoi, varNumHdrRoi)
    # Otherwise, create dummy vector (for function I/O)
    else:
        aryRoiVrtx = 0

    # Import second criterion vtk file (all depth levels)
    if lgcSlct02:
        if idxPrc == 0:
            print('---------Importing second criterion vtk file (all depth '
                  + 'levels).')
        arySlct02 = load_vtk_multi(strVtkSlct02,
                                   strPrcdData,
                                   varNumLne,
                                   varNumDpth)
    # Otherwise, create dummy vector (for function I/O)
    else:
        arySlct02 = 0

    # Import third criterion vtk file (all depth levels)
    if lgcSlct03:
        if idxPrc == 0:
            print('---------Importing third criterion vtk file (all depth '
                  + 'levels).')
        arySlct03 = load_vtk_multi(strVtkSlct03,
                                   strPrcdData,
                                   varNumLne,
                                   varNumDpth)
    # Otherwise, create dummy array (for function I/O)
    else:
        arySlct03 = 0

    # Import fourth criterion vtk file (one depth level)
    if lgcSlct04:
        if idxPrc == 0:
            print('---------Importing fourth criterion vtk file (one depth '
                  + 'level).')
        arySlct04 = load_vtk_multi(strVtkSlct04,
                                   strPrcdData,
                                   varNumLne,
                                   varNumDpth)
    # Otherwise, create dummy array (for function I/O):
    else:
        arySlct04 = 0

    # Import depth data vtk files
    if idxPrc == 0:
        print('---------Importing depth data vtk files.')
    # Number of input files (i.e. number of conditions):
    varNumCon = len(lstVtkDpth01)
    # List for input data:
    lstDpthData01 = [None] * varNumCon
    # Loop through input data files:
    for idxIn in range(0, varNumCon):
        # Import data from file:
        lstDpthData01[idxIn] = load_vtk_multi(lstVtkDpth01[idxIn],
                                              strPrcdData,
                                              varNumLne,
                                              varNumDpth)
        if idxPrc == 0:
            print('------------File ' + str(idxIn + 1) + ' out of '
                  + str(varNumCon))
    # *************************************************************************

    # *************************************************************************
    # *** Convert cope to percent signal change

    # According to the FSL documentation
    # (https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/UserGuide), the PEs can be
    # scaled to signal change with respect to the mean (over time within
    # voxel): "This is achieved by scaling the PE or COPE values by (100*) the
    # peak-peak height of the regressor (or effective regressor in the case of
    # COPEs) and then by dividing by mean_func (the mean over time of
    # filtered_func_data)." However, this PSC would be with respect to the
    # temporal mean, but we are interested in the PSC with respect to
    # pre-stimulus baseline. Thus, we extract the difference (a scaling
    # factor) between these two (i.e. temporal mean vs. pre-stimulus baseline)
    # from the respective FSL design matrix (`design.mat` in the FEAT
    # directory). The scaling factor is approximately 1.4 (slightly different
    # values for sustained and transient predictors, but close enough not to
    # matter). This scaling factor needs to be applied after the procedure
    # described in the FSL documentation. Thus, the final PSC is calculated as
    # follows: `(PE * (100 * peak-peak height) / tmean) * 1.4`. The pp-height
    # is obtained from `design.mat`.

    # Only perform scaling if the data is from an FSL cope file:
    if (('cope' in lstVtkDpth01[0]) or ('_pe' in lstVtkDpth01[0])):
        if idxPrc == 0:
            print('---------Convert cope to percent signal change.')

        # The peak-peak height depends on the predictor (i.e. condition).
        if 'sst' in lstVtkDpth01[0]:
            varPpheight = 1.268049
        elif 'trn' in lstVtkDpth01[0]:
            varPpheight = 0.2269044
        else:
            if idxPrc == 0:
                print(('------------WARNING: Cannot determine condition from '
                       + 'file name, peak-peak height of the regressor is set '
                       + 'to 1.'))
            varPpheight = 1.0

        # Loop through input data files:
        for idxIn in range(0, varNumCon):

            # Get PEs:
            aryTmp = lstDpthData01[idxIn].astype(np.float64)

            # In order to avoid division by zero, avoid zero-voxels:
            lgcTmp = np.not_equal(arySlct03, 0.0)

            # Apply PSC scaling, as described above:
            aryTmp[lgcTmp] = np.multiply(
                                         np.divide(
                                                   np.multiply(aryTmp[lgcTmp],
                                                               (100.0
                                                                * varPpheight)
                                                               ),
                                                   arySlct03[lgcTmp]),
                                         1.0  # 1.4
                                         )

            # Put scaled PEs back into list (now PSC with respect to
            # pre-stimulus baseline):
            lstDpthData01[idxIn] = aryTmp
    # *************************************************************************

    # *************************************************************************
    # *** Select vertices

    lstDpthData01, varNumInc, vecInc = \
        slct_vrtcs(varNumCon,           # Number of conditions
                   lstDpthData01,       # List with depth-sampled data I
                   lgcSlct01,           # Criterion 1 - Yes or no?
                   aryRoiVrtx,          # Criterion 1 - Data (ROI)
                   lgcSlct02,           # Criterion 2 - Yes or no?
                   arySlct02,           # Criterion 2 - Data
                   varThrSlct02,        # Criterion 2 - Threshold
                   lgcSlct03,           # Criterion 3 - Yes or no?
                   arySlct03,           # Criterion 3 - Data
                   varThrSlct03,        # Criterion 3 - Threshold
                   lgcSlct04,           # Criterion 4 - Yes or no?
                   arySlct04,           # Criterion 4 - Data
                   tplThrSlct04,        # Criterion 4 - Threshold
                   idxPrc)              # Process ID
    # *************************************************************************

    # *************************************************************************
    # *** Create VTK mesh mask

    if idxPrc == 0:
        print('---------Creating VTK mesh mask.')

    # We would like to be able to visualise the selected vertices on the
    # cortical surface, i.e. on a vtk mesh.
    vtk_msk(strSubId,         # Data struc - Subject ID
            lstVtkDpth01[0],  # Data struc - Path first data vtk file
            strPrcdData,      # Data struc - Str. prcd. VTK data
            varNumLne,        # Data struc - Lns. prcd. data VTK
            strCsvRoi,        # Data struc - ROI CSV fle (outpt. naming)
            vecInc,           # Vector with included vertices
            strMetaCon)       # Metacondition (stimulus or periphery)
    # *************************************************************************

    # *************************************************************************
    # *** Calculate mean & conficende interval

    if idxPrc == 0:
        print('---------Plot results - mean over vertices.')

    # Prepare arrays for results (mean & confidence interval):
    aryDpthMean = np.zeros((varNumCon, varNumDpth))
    aryDpthConf = np.zeros((varNumCon, varNumDpth))

    # Fill array with data - loop through input files:
    for idxIn in range(0, varNumCon):

        # Loop through depth levels:
        for idxDpth in range(0, varNumDpth):

            # Avoid warning in case of empty array (i.e. no vertices included
            # in ROI for current ROI/subject/hemisphere):
            if np.greater(np.sum(vecInc), 0):

                # Retrieve all vertex data for current input file & current
                # depth level:
                aryTmp = lstDpthData01[idxIn][:, idxDpth]

                # Calculate mean over vertices:
                varTmp = np.mean(aryTmp)

            else:

                # No vertices in ROI:
                varTmp = 0.0

            # Place mean in array:
            aryDpthMean[idxIn, idxDpth] = varTmp

            # Calculate 95% confidence interval for the mean, obtained by
            # multiplying the standard error of the mean (SEM) by 1.96. We
            # obtain  the SEM by dividing the standard deviation by the
            # squareroot of the sample size n. We get n by taking 1/8 of the
            # number of vertices,  which corresponds to the number of voxels in
            # native resolution.
            varTmp = np.multiply(np.divide(np.std(aryTmp),
                                           np.sqrt(aryTmp.size * 0.125)),
                                 1.96)
            # Place confidence interval in array:
            aryDpthConf[idxIn, idxDpth] = varTmp

            # Calculate standard error of the mean.
            # varTmp = np.divide(np.std(aryTmp),
            #                    np.sqrt(aryTmp.size * 0.125))
            # Place SEM in array:
            # aryDpthConf[idxIn, idxDpth] = varTmp

            # Calculate standard deviation over vertices:
            # varTmp = np.std(aryTmp)

            # Place standard deviation in array:
            # aryDpthConf[idxIn, idxDpth] = varTmp

    # Normalise by division:
    if lgcNormDiv:

        if idxPrc == 0:
            print('---------Normalisation by division.')

        # Vector for subtraction:
        # vecSub = np.array(aryDpthMean[varNormIdx, :], ndmin=2)
        # Divide all rows by reference row:
        # aryDpthMean = np.divide(aryDpthMean, vecSub)

        # Calculate 'grand mean', i.e. the mean PE across depth levels and
        # conditions:
        varGrndMean = np.mean(aryDpthMean)
        # varGrndMean = np.median(aryDpthMean)

        # Divide all values by the grand mean:
        aryDpthMean = np.divide(np.absolute(aryDpthMean), varGrndMean)
        aryDpthConf = np.divide(np.absolute(aryDpthConf), varGrndMean)
    # *************************************************************************

    # *************************************************************************
    # *** Create plot

    if False:

        # File name for figure:
        strPltOt = strPltOtPre + strSubId + strPltOtSuf

        # Title, including information about number of vertices:
        strTitleTmp = (strTitle
                       + ', '
                       + str(varNumInc)
                       + ' vertices')

        plt_dpth_prfl(aryDpthMean,  # Data: aryData[Condition, Depth]
                      aryDpthConf,  # Error shading: aryError[Condition, Depth]
                      varNumDpth,   # Number of depth levels (on the x-axis)
                      varNumCon,    # Number of conditions (separate lines)
                      varDpi,       # Resolution of the output figure
                      varYmin,      # Minimum of Y axis
                      varYmax,      # Maximum of Y axis
                      False,        # Boolean: whether to convert y axis to %
                      lstConLbl,    # Labels for conditions (separate lines)
                      strXlabel,    # Label on x axis
                      strYlabel,    # Label on y axis
                      strTitleTmp,  # Figure title
                      True,         # Boolean: whether to plot a legend
                      strPltOt)
    # *************************************************************************

    # *************************************************************************
    # *** Return

    # Output list:
    lstOut = [idxPrc,
              aryDpthMean,
              varNumInc]

    queOut.put(lstOut)
# *****************************************************************************

# *****************************************************************************
# *** Plot ratio across depth levels

print('---Plotting ratio')

# Calculate mean ratio (mean across subjects):
aryRatioMean = np.mean(aryRatio, axis=0)

# Calculate standard error of the mean (for error bar):
aryRatioSem = np.divide(np.std(aryRatio, axis=0), np.sqrt(varNumSub))

# Plot ratio across cortrtical depth:
plt_dpth_prfl(aryRatioMean, aryRatioSem, varNumDpth, varNumCon, varDpi,
              varYmin03, varYmax03, True, lstConLbl, strXlabel03, strYlabel03,
              strTtl03, lgcLgnd03, strPathOut03)
# *****************************************************************************

# *****************************************************************************
# *** Subtract baseline mean

# The input to this function are timecourses that have been normalised to the
# pre-stimulus baseline. The datapoints are signal intensity relative to the
# pre-stimulus baseline, and the pre-stimulus baseline has a mean of one. We
# subtract one, so that the datapoints are percent signal change relative to
# baseline.
for strSubID, aryRoiErt in dicAllSubsRoiErt.items():
    aryRoiErt = np.subtract(aryRoiErt, 1.0)
    # Is this line necessary (hard copy)?
    dicAllSubsRoiErt[strSubID] = aryRoiErt
def boot_plot(lstCon,
              objDpth,
              strPath,
              varNumIt=10000,
              varConLw=2.5,
              varConUp=97.5,
              strTtl='',
              strXlabel='Cortical depth level (equivolume)',
              strYlabel='fMRI signal change [arbitrary units]',
              lgcLgnd=False):
    """
    Plot across-subject cortical depth profiles with confidence intervals.

    Parameters
    ----------
    lstCon : list
        Abbreviated condition levels used to complete file names (e.g. 'Pd').
    objDpth : np.array or str
        Array with single-subject cortical depth profiles, of the form:
        aryDpth[idxSub, idxCondition, idxDpth]. Either a numpy array or a
        string with the path to an npy file containing the array.
    strPath : str
        Output path for plot.
    varNumIt : int
        Number of bootstrap iterations.
    varConLw : float
        Lower bound of the percentile bootstrap confidence interval in
        percent (i.e. in range of [0, 100]).
    varConUp : float
        Upper bound of the percentile bootstrap confidence interval in
        percent (i.e. in range of [0, 100]).
    strTtl : str
        Plot title.
    strXlabel : str
        Label for x axis.
    strYlabel : str
        Label for y axis.
    lgcLgnd : bool
        Whether to show a legend.

    Returns
    -------
    None : None
        This function has no return value.

    Notes
    -----
    Plot across-subject median cortical depth profiles with percentile
    bootstrap confidence intervals. This function bootstraps (i.e. resamples
    with replacement) from an array of single-subject depth profiles,
    calculates a confidence interval of the median across bootstrap iterations
    and plots the empirical median & bootstrap confidence intervals along the
    cortical depth.

    Function of the depth sampling pipeline.
    """
    # ------------------------------------------------------------------------
    # *** Prepare bootstrapping

    # Test whether the input is a numpy array or a string (with the path to a
    # numpy array):
    lgcAry = (type(objDpth) == np.ndarray)
    lgcStr = (type(objDpth) == str)

    # If input is a string, load array from npy file:
    if lgcAry:
        aryDpth = objDpth
    elif lgcStr:
        # Load array for first condition to get dimensions:
        aryTmpDpth = np.load(objDpth.format(lstCon[0]))
        # Number of subjects:
        varNumSub = aryTmpDpth.shape[0]
        # Get number of depth levels from input array:
        varNumDpth = aryTmpDpth.shape[1]
        # Number of conditions:
        varNumCon = len(lstCon)
        # Array for depth profiles of form aryDpth[subject, condition, depth]:
        aryDpth = np.zeros((varNumSub, varNumCon, varNumDpth))
        # Load single-condition arrays from disk:
        for idxCon in range(varNumCon):
            aryDpth[:, idxCon, :] = np.load(objDpth.format(lstCon[idxCon]))
    else:
        print(('---Error in bootPlot: input needs to be numpy array or path ' +
               'to numpy array.'))

    # Get number of subjects from input array:
    varNumSub = aryDpth.shape[0]
    # Get number of conditions from input array:
    varNumCon = aryDpth.shape[1]
    # Get number of depth levels from input array:
    varNumDpth = aryDpth.shape[2]

    # We will sample subjects with replacement. How many subjects to sample on
    # each iteration:
    varNumSmp = varNumSub

    # Random array with subject indicies for bootstrapping of the form
    # aryRnd[varNumIt, varNumSmp]. Each row includes the indicies of the
    # subjects to the sampled on that iteration.
    aryRnd = np.random.randint(0, high=varNumSub, size=(varNumIt, varNumSmp))

    # Array for bootstrap samples, of the form
    # aryBoo[idxIteration, idxSubject, idxCondition, idxDpth]):
    aryBoo = np.zeros((varNumIt, varNumSub, varNumCon, varNumDpth))

    # ------------------------------------------------------------------------
    # *** Bootstrap

    # Loop through bootstrap iterations:
    for idxIt in range(varNumIt):
        # Indices of current bootstrap sample:
        vecRnd = aryRnd[idxIt, :]
        # Put current bootstrap sample into array:
        aryBoo[idxIt, :, :, :] = aryDpth[vecRnd, :, :]

    # Median for each bootstrap sample (across subjects within the bootstrap
    # sample):
    aryBooMed = np.median(aryBoo, axis=1)

    # Delete large bootstrap array:
    del (aryBoo)

    # Percentile bootstrap for median:
    aryPrct = np.percentile(aryBooMed, (varConLw, varConUp), axis=0)

    # ------------------------------------------------------------------------
    # *** Plot result

    # Condition labels:
    lstConLbl = ['2.5%', '6.1%', '16.3%', '72.0%']

    # Labels for axes:
    strXlabel = 'Cortical depth level (equivolume)'
    strYlabel = 'fMRI signal change [arbitrary units]'

    # Empirical median:
    aryEmpMed = np.median(aryDpth, axis=0)

    plt_dpth_prfl(aryEmpMed,
                  None,
                  varNumDpth,
                  varNumCon,
                  80.0,
                  0.0,
                  2.0,
                  False,
                  lstConLbl,
                  strXlabel,
                  strYlabel,
                  strTtl,
                  lgcLgnd,
                  strPath,
                  varSizeX=1800.0,
                  varSizeY=1600.0,
                  varNumLblY=5,
                  varPadY=(0.1, 0.1),
                  aryCnfLw=aryPrct[0, :, :],
                  aryCnfUp=aryPrct[1, :, :])