Пример #1
0
def TwoSamples_tTest(x,y, SignificanceLevel=0.05):

    # Analyze data
    n = len(x)
    m = len(y)
    s_x = np.std(x,ddof=1)
    s_y = np.std(y,ddof=1)
    x_bar = np.mean(x)
    y_bar = np.mean(y)

    # Perform test statistic
    DOFs = n+m-2
    S_pool = np.sqrt(1/DOFs * ( (n-1)*s_x**2 + (m-1)*s_y**2 ))
    T = (x_bar - y_bar) / (S_pool * np.sqrt(1/n + 1/m))

    # Compute p value
    from scipy.stats.distributions import t
    if T >= 0:
        p = 2 * (1-t.cdf(T,DOFs))
    else:
        p = 2 * t.cdf(T, DOFs)

    # Compute confidence interval CI
    T_Interval = np.array(t.interval(1-SignificanceLevel,DOFs))
    RejectionRange = np.array([[-np.inf,T_Interval[0]],[T_Interval[1],np.inf]])

    # Compute CI for difference in means
    MeansInterval =  (x_bar-y_bar) + T_Interval * S_pool * np.sqrt(1/n + 1/m)

    return T, p, RejectionRange, MeansInterval
Пример #2
0
def PlotRegressionResults(Model, Data, Alpha=0.95, Random=True):

    ## Get data from the model
    Y_Obs = Model.model.endog
    Y_Fit = Model.fittedvalues
    if not Random:
        Y_Fit = Model.predict()
    N = int(Model.nobs)
    C = np.matrix(Model.cov_params())
    X = np.matrix(Model.model.exog)

    if not C.shape[0] == X.shape[1]:
        C = C[:-1,:-1]


    ## Compute R2 and standard error of the estimate
    E = Y_Obs - Y_Fit
    RSS = np.sum(E ** 2)
    SE = np.sqrt(RSS / Model.df_resid)
    TSS = np.sum((Model.model.endog - Model.model.endog.mean()) ** 2)
    RegSS = TSS - RSS
    R2 = RegSS / TSS

    ## Compute R2 adj and NE
    R2adj = 1 - RSS/TSS * (N-1)/(N-X.shape[1]-1)

    Line = Data['BVTV'] * Model.params['BVTV'] + Model.params['Intercept']

    B_0 = np.sqrt(np.diag(np.abs(X * C * X.T)))
    t_Alpha = t.interval(Alpha, N - X.shape[1] - 1)
    CI_Line_u = Line + t_Alpha[0] * SE * B_0
    CI_Line_o = Line + t_Alpha[1] * SE * B_0


    ## Plots
    DPI = 500
    Figure, Axes = plt.subplots(1, 1, figsize=(5.5, 4.5), dpi=DPI)
    Axes.plot(Data['BVTV'], Data['tBMD'],
              linestyle='none', marker='o', color=(0, 0, 0), fillstyle='none', label='Data')
    Axes.plot(np.sort(Data['BVTV']), Line[Data['BVTV'].sort_values().index], linestyle='--', color=(1, 0, 0), label='Fit')
    Axes.plot(np.sort(Data['BVTV']), CI_Line_u[Data['BVTV'].sort_values().index], color=(0, 0, 1), linestyle='--', label= str(int(100*Alpha)) + '% CI')
    Axes.plot(np.sort(Data['BVTV']), CI_Line_o[Data['BVTV'].sort_values().index], color=(0, 0, 1), linestyle='--')
    Axes.set_xlabel('BV/TV (-)')
    Axes.set_ylabel('Tissue BMD (mgHA/cm$^3$)')
    plt.legend(loc='upper center', ncol=3, bbox_to_anchor=(0.5, 1.15))
    plt.subplots_adjust(left=0.175)
    plt.show()
    plt.close(Figure)

    return R2adj, SE
Пример #3
0
def ComputeOriginalModelConstants(Model, Alpha=0.95, k=0, l=0):

    ## Print summary
    print('\n\n Summary and constant for original model\n')
    print(Model.summary())

    ## Get values, covariance matrix and CI
    B = Model.params
    C = Model.cov_params()
    B_CI = Model.conf_int()

    ## Set t value for given confidence level
    t_Alpha = t.interval(Alpha, Model.nobs - 12 - 1)

    ## Compute stiffness constants
    Mu0 = np.exp(B['Sjj']) / 2
    Lambda0p = np.exp(B['Sij'])
    Lambda0 = np.exp(B['Sii']) - 2 * Mu0

    ## Compute CI for lambda 0
    C_add = np.abs(C.loc['Sii', 'Sii']) + np.abs(C.loc['Sjj', 'Sjj'])
    SE_L0 = np.sqrt(C_add + 2 * np.abs(C.loc['Sii', 'Sjj']))
    L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

    print('\n\nFit constants with ' + str(Alpha * 100) + '% CI :')
    print('Lambda0: ' + str(int(round(Lambda0, 0))) + ' [' +
          str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
          str(int(round(np.exp(L0_CI[1]), 0))) + ']')
    print('Lambda0p: ' + str(int(round(Lambda0p, 0))) + ' [' +
          str(int(round(np.exp(B_CI.loc['Sij', 0]), 0))) + ' - ' +
          str(int(round(np.exp(B_CI.loc['Sij', 1]), 0))) + ']')
    print('Mu0: ' + str(int(round(Mu0, 0))) + ' [' +
          str(int(round(np.exp(B_CI.loc['Sjj', 0]) / 2, 0))) + ' - ' +
          str(int(round(np.exp(B_CI.loc['Sjj', 1]) / 2, 0))) + ']')

    ## Build data frame
    Table = pd.DataFrame({
        'Lambda0': Lambda0,
        'Lambda0 CI': np.exp(L0_CI),
        'Lambda0p': Lambda0p,
        'Lambda0p CI': np.exp(B_CI.loc['Sij', :]),
        'Mu0': Mu0,
        'Mu0 CI': np.exp(B_CI.loc['Sjj', :]) / 2
    })

    ## Get exponent values
    if 'LogBVTV' in B and 'Logmxy' in B:
        l = B['Logmxy']
        k = B['LogBVTV']

        print('k: ' + str(round(k, 3)) + ' [' +
              str(round(B_CI.loc['LogBVTV', 0], 3)) + ' - ' +
              str(round(B_CI.loc['LogBVTV', 1], 3)) + ']')
        print('l: ' + str(round(l, 3)) + ' [' +
              str(round(B_CI.loc['Logmxy', 0], 3)) + ' - ' +
              str(round(B_CI.loc['Logmxy', 1], 3)) + ']' + '\n\n')

        Table[['k', 'k CI', 'l',
               'l CI']] = k, B_CI.loc['LogBVTV', :], l, B_CI.loc['Logmxy', :]

    else:
        print('k: ' + str(round(k, 3)))
        print('l: ' + str(round(l, 3)) + '\n\n')

        Table[['k', 'l']] = k, l

    ## Compute R2 and standard error of the estimate
    E = Model.resid.values
    RSS = np.sum(E**2)
    SE = np.sqrt(RSS / Model.df_resid)
    TSS = np.sum((Model.model.endog - Model.model.endog.mean())**2)
    RegSS = TSS - RSS
    R2 = RegSS / TSS

    Table[['R2', 'SE']] = R2, SE

    ## Partial R2
    for Parameter in B.index:
        if 'Var' in Parameter:
            Y_Obs = np.exp(Model.model.endog)
            Y_Predict = np.exp(Model.predict())
            E_Predict = np.log(Y_Obs) - np.log(Y_Predict)
            RSS_Predict = np.sum(E_Predict**2)
            SE_Predict = np.sqrt(RSS_Predict / Model.df_resid)
            RegSS_Predict = TSS - RSS_Predict
            R2_Predict = RegSS_Predict / TSS

            Table[['Partial R2', 'Partial SE']] = R2_Predict, SE_Predict

    return Table
Пример #4
0
def PlotFilteredRegression(Model,
                           CompleteData,
                           PlotTypes=['BV/TV', 'DA', 'CV', 'Constants'],
                           Alpha=0.95):

    ## Get data from the model
    Y_Obs = np.exp(Model.model.endog)
    Y_Fit = np.exp(Model.fittedvalues)
    N = int(Model.nobs)
    C = np.matrix(Model.cov_params())
    X = np.matrix(Model.model.exog)

    if not C.shape[0] == X.shape[1]:
        C = C[:-1, :-1]

    ## Compute R2 and standard error of the estimate
    E = Model.resid.values
    RSS = np.sum(E**2)
    SE = np.sqrt(RSS / Model.df_resid)
    TSS = np.sum((Model.model.endog - Model.model.endog.mean())**2)
    RegSS = TSS - RSS
    R2 = RegSS / TSS

    Line = np.linspace(min(Y_Obs.min(), Y_Fit.min()),
                       max(Y_Obs.max(), Y_Fit.max()), N)

    B_0 = np.sqrt(np.diag(np.abs(X * C * X.T)))
    t_Alpha = t.interval(Alpha, N - X.shape[1] - 1)
    CI_Line_u = Line + t_Alpha[0] * np.exp(SE) * np.exp(B_0)
    CI_Line_o = Line + t_Alpha[1] * np.exp(SE) * np.exp(B_0)

    Sii = Y_Fit * np.array(X[:, 0].T)[0]
    Sij = Y_Fit * np.array(X[:, 1].T)[0]
    Sjj = Y_Fit * np.array(X[:, 2].T)[0]

    ## Predict values for excluded data
    FilteredIndex = Model.model.data.orig_exog.index
    ExcludedData = pd.DataFrame()
    for Index in CompleteData.index:
        if Index not in FilteredIndex:
            ExcludedData = ExcludedData.append(CompleteData.loc[Index])

    Y_Predict = np.exp(Model.predict(ExcludedData))

    ## Plots
    DPI = 100
    SMax = max(HealthyData[Y_Elements].max()) * 5
    SMin = min(OIData[Y_Elements].min()) / 5
    if 'BV/TV' in PlotTypes:
        Color = np.exp(CompleteData.loc[FilteredIndex, 'LogBVTV'].values)
        VMin = HealthyData['BV/TV'].min()
        VMax = HealthyData['BV/TV'].max()
        Figure, Axes = plt.subplots(1,
                                    1,
                                    figsize=(6.5, 4.5),
                                    dpi=DPI,
                                    sharey=True,
                                    sharex=True)
        Scatter = Axes.scatter(Y_Obs,
                               np.array(Y_Fit),
                               c=Color,
                               vmin=VMin,
                               vmax=VMax,
                               cmap='jet',
                               marker='o')
        Axes.plot(np.exp(ExcludedData['LogSxy']),
                  Y_Predict,
                  linestyle='none',
                  color=(0, 0, 0),
                  marker='o',
                  alpha=0.2,
                  label='Excluded data')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_u), color=(0.4, 0.4, 0.4), linestyle='--')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_o), color=(0.4, 0.4, 0.4), linestyle='--')
        Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
        Axes.annotate(r'$N$  : ' + str(len(Y_Obs)),
                      xy=(0.7, 0.175),
                      xycoords='axes fraction')
        Axes.annotate(r'$R^2$: ' + str(round(R2, 4)),
                      xy=(0.7, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'$SE$: ' + str(round(SE, 4)),
                      xy=(0.7, 0.025),
                      xycoords='axes fraction')
        Axes.set_xlabel('Observed $\mathbb{S}_{xy}$ (MPa)')
        Axes.set_ylabel('Fitted $\mathbb{S}_{xy}$ (MPa)')
        Axes.set_xlim([SMin, SMax])
        Axes.set_ylim([SMin, SMax])
        plt.xscale('log')
        plt.yscale('log')
        ColorBar = plt.colorbar(Scatter)
        ColorBar.ax.set_ylabel('BV/TV (-)')
        plt.legend(loc='upper left')
        plt.subplots_adjust(left=0.15, bottom=0.15)
        plt.show()

    if 'DA' in PlotTypes:
        Color = np.exp(CompleteData.loc[FilteredIndex, 'Logmxy'].values)
        Figure, Axes = plt.subplots(1,
                                    1,
                                    figsize=(6.5, 4.5),
                                    dpi=DPI,
                                    sharey=True,
                                    sharex=True)
        Scatter = Axes.scatter(Y_Obs,
                               np.array(Y_Fit),
                               c=Color,
                               cmap='jet',
                               marker='o')
        Axes.plot(np.exp(ExcludedData['LogSxy']),
                  Y_Predict,
                  linestyle='none',
                  color=(0, 0, 0),
                  marker='o',
                  alpha=0.2,
                  label='Excluded data')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_u), color=(0.4, 0.4, 0.4), linestyle='--')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_o), color=(0.4, 0.4, 0.4), linestyle='--')
        Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
        Axes.annotate(r'$N$  : ' + str(len(Y_Obs)),
                      xy=(0.7, 0.175),
                      xycoords='axes fraction')
        Axes.annotate(r'$R^2$: ' + str(round(R2, 4)),
                      xy=(0.7, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'$SE$: ' + str(round(SE, 4)),
                      xy=(0.7, 0.025),
                      xycoords='axes fraction')
        Axes.set_xlabel('Observed $\mathbb{S}_{xy}$ (MPa)')
        Axes.set_ylabel('Fitted $\mathbb{S}_{xy}$ (MPa)')
        Axes.set_xlim([SMin, SMax])
        Axes.set_ylim([SMin, SMax])
        plt.xscale('log')
        plt.yscale('log')
        ColorBar = plt.colorbar(Scatter)
        ColorBar.ax.set_ylabel('Degree of Anisotropy (-)')
        plt.legend(loc='upper left')
        plt.subplots_adjust(left=0.15, bottom=0.15)
        plt.show()

    if 'CV' in PlotTypes:
        Color = np.exp(CompleteData.loc[FilteredIndex, 'LogCV'].values)
        VMin = HealthyData['Variation Coefficient'].min()
        VMax = HealthyData['Variation Coefficient'].max()
        Figure, Axes = plt.subplots(1,
                                    1,
                                    figsize=(6.5, 4.5),
                                    dpi=DPI,
                                    sharey=True,
                                    sharex=True)
        Scatter = Axes.scatter(Y_Obs,
                               np.array(Y_Fit),
                               c=Color,
                               vmin=VMin,
                               vmax=VMax,
                               cmap='jet',
                               marker='o')
        Axes.plot(np.exp(ExcludedData['LogSxy']),
                  Y_Predict,
                  linestyle='none',
                  color=(0, 0, 0),
                  marker='o',
                  alpha=0.2,
                  label='Excluded data')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_u), color=(0.4, 0.4, 0.4), linestyle='--')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_o), color=(0.4, 0.4, 0.4), linestyle='--')
        Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
        Axes.annotate(r'$N$  : ' + str(len(Y_Obs)),
                      xy=(0.7, 0.175),
                      xycoords='axes fraction')
        Axes.annotate(r'$R^2$: ' + str(round(R2, 4)),
                      xy=(0.7, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'$SE$: ' + str(round(SE, 4)),
                      xy=(0.7, 0.025),
                      xycoords='axes fraction')
        Axes.set_xlabel('Observed $\mathbb{S}_{xy}$ (MPa)')
        Axes.set_ylabel('Fitted $\mathbb{S}_{xy}$ (MPa)')
        Axes.set_xlim([SMin, SMax])
        Axes.set_ylim([SMin, SMax])
        plt.xscale('log')
        plt.yscale('log')
        ColorBar = plt.colorbar(Scatter)
        ColorBar.ax.set_ylabel('Coefficient of Variation (-)')
        plt.legend(loc='upper left')
        plt.subplots_adjust(left=0.15, bottom=0.15)
        plt.show()

    if 'Constants' in PlotTypes:
        Figure, Axes = plt.subplots(1,
                                    1,
                                    figsize=(5.5, 4.5),
                                    dpi=DPI,
                                    sharey=True,
                                    sharex=True)
        Axes.plot(Y_Obs,
                  Sii,
                  color=(0, 0, 1),
                  linestyle='none',
                  marker='o',
                  label=r'$\lambda_{ii}$')
        Axes.plot(Y_Obs,
                  Sij,
                  color=(0, 1, 0),
                  linestyle='none',
                  marker='o',
                  label=r'$\lambda_{ij}$')
        Axes.plot(Y_Obs,
                  Sjj,
                  color=(1, 0, 0),
                  linestyle='none',
                  marker='o',
                  label=r'$\mu_{ij}$')
        Axes.plot(np.exp(ExcludedData['LogSxy']),
                  Y_Predict,
                  linestyle='none',
                  color=(0, 0, 0),
                  marker='o',
                  alpha=0.2,
                  label='Excluded data')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_u), color=(0.4, 0.4, 0.4), linestyle='--')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_o), color=(0.4, 0.4, 0.4), linestyle='--')
        Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
        Axes.annotate(r'$N$  : ' + str(len(Y_Obs)),
                      xy=(0.7, 0.175),
                      xycoords='axes fraction')
        Axes.annotate(r'$R^2$: ' + str(round(R2, 4)),
                      xy=(0.7, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'$SE$: ' + str(round(SE, 4)),
                      xy=(0.7, 0.025),
                      xycoords='axes fraction')
        Axes.set_xlabel('Observed $\mathbb{S}_{xy}$')
        Axes.set_ylabel('Fitted $\mathbb{S}_{xy}$')
        Axes.set_xlim([SMin, SMax])
        Axes.set_ylim([SMin, SMax])
        plt.xscale('log')
        plt.yscale('log')
        plt.legend(loc='upper left')
        plt.subplots_adjust(left=0.15, bottom=0.15)
        plt.show()

    return
Пример #5
0
                          bins=20,
                          edgecolor=(0, 0, 1),
                          color=(1, 1, 1),
                          label='Histogram')
    Axes.plot(D, KernelEstimator, color=(1, 0, 0), label='Kernel Density')
    Axes.plot(D,
              TheoreticalDistribution,
              linestyle='--',
              color=(0, 0, 0),
              label='Normal Distribution')
    Axes.plot([D_bar - 1.96 * S_D, D_bar + 1.96 * S_D], [0.4, 0.4],
              color=(0, 1, 0))
    plt.xlabel('D values')
    plt.ylabel('Density (-)')
    plt.legend(loc='upper center',
               ncol=3,
               bbox_to_anchor=(0.5, 1.15),
               prop={'size': 10})
    plt.show()
    plt.close(Figure)

    ## Compute correction added with CV
    B_CVEffect = CVEffect_Models[i].params
    Exponent = B_CVEffect['LogCV']
    CVMin, CVMax = np.exp(D_bar + S_D * np.array(t.interval(0.95, N_D)))

    print('\n\n' + Datasets[i] + ' Data set')
    print('CV low: ' + str(round(CVMin**Exponent, 3)))
    print('CV high: ' + str(round(CVMax**Exponent, 3)))
    print('Error factor: ' + str(round((CVMax / CVMin)**Exponent, 3)))
Пример #6
0
def PlotRegressionResults(Model, Alpha=0.95):

    print(Model.summary())

    ## Plot results
    Y_Obs = Model.model.endog
    Y_Fit = Model.fittedvalues
    N = int(Model.nobs)
    C = np.matrix(Model.cov_params())
    X = np.matrix(Model.model.exog)
    X_Obs = np.sort(np.array(X[:, 1]).reshape(len(X)))

    ## Compute R2 and standard error of the estimate
    E = Y_Obs - Y_Fit
    RSS = np.sum(E**2)
    SE = np.sqrt(RSS / Model.df_resid)
    TSS = np.sum((Model.model.endog - Model.model.endog.mean())**2)
    RegSS = TSS - RSS
    R2 = RegSS / TSS
    R2adj = 1 - RSS / TSS * (N - 1) / (N - X.shape[1] + 1 - 1)

    ## Compute CI lines
    B_0 = np.sqrt(np.diag(np.abs(X * C * X.T)))
    t_Alpha = t.interval(Alpha, N - X.shape[1] - 1)
    CI_Line_u = Y_Fit + t_Alpha[0] * SE * B_0
    CI_Line_o = Y_Fit + t_Alpha[1] * SE * B_0

    t_Alpha2 = t.interval(0.9, N - X.shape[1] - 1)
    CI_Line_u2 = Y_Fit + t_Alpha2[0] * SE * B_0
    CI_Line_o2 = Y_Fit + t_Alpha2[1] * SE * B_0

    ## Plots
    DPI = 100
    Figure, Axes = plt.subplots(1,
                                1,
                                figsize=(5.5, 4.5),
                                dpi=DPI,
                                sharey=True,
                                sharex=True)
    Axes.plot(X[:, 1], Y_Fit, color=(1, 0, 0))
    Axes.fill_between(X_Obs,
                      np.sort(CI_Line_o2),
                      np.sort(CI_Line_u2),
                      color=(0, 0, 0),
                      alpha=0.1)
    Axes.plot(X_Obs, np.sort(CI_Line_u), color=(0, 0, 1), linestyle='--')
    Axes.plot(X_Obs, np.sort(CI_Line_o), color=(0, 0, 1), linestyle='--')
    Axes.annotate(r'$N$  : ' + str(N), xy=(0.7, 0.2), xycoords='axes fraction')
    Axes.annotate(r'$R^2$ : ' + format(round(R2, 5), '.5f'),
                  xy=(0.7, 0.125),
                  xycoords='axes fraction')
    Axes.annotate(r'$SE$ : ' + format(round(SE, 2), '.2f'),
                  xy=(0.7, 0.05),
                  xycoords='axes fraction')
    Axes.plot(X[:, 1],
              Y_Obs,
              linestyle='none',
              marker='o',
              color=(0, 0, 0),
              fillstyle='none',
              label='Data')
    Axes.plot([], color=(1, 0, 0), label='Fit')
    Axes.fill_between([], [], color=(0, 0, 0), alpha=0.1, label='90% CI')
    Axes.plot([], color=(0, 0, 1), linestyle='--', label='95% CI')
    Axes.set_xlabel('Phantom gray values (-)')
    Axes.set_ylabel('Cylinder reference densities (mg HA/ccm)')
    plt.subplots_adjust(left=0.15, bottom=0.15)
    plt.legend(loc='upper left')
    plt.show()
    plt.close(Figure)
Пример #7
0
def PlotRegressionResults(Model,
                          Data,
                          PlotTypes=['BV/TV', 'DA', 'Constants'],
                          Alpha=0.95,
                          Random=True,
                          Colors=[(0, 0, 1), (0, 1, 0), (1, 0, 0)]):

    ## Get data from the model
    Y_Obs = np.exp(Model.model.endog)
    Y_Fit = np.exp(Model.fittedvalues)
    if not Random:
        Y_Fit = np.exp(Model.predict())
    N = int(Model.nobs)
    C = np.matrix(Model.cov_params())
    X = np.matrix(Model.model.exog)
    NROIs = int(N / 12)

    if not C.shape[0] == X.shape[1]:
        C = C[:-1, :-1]

    ## Compute R2 and standard error of the estimate
    E = np.log(Y_Obs) - np.log(Y_Fit)
    RSS = np.sum(E**2)
    SE = np.sqrt(RSS / Model.df_resid)
    TSS = np.sum((Model.model.endog - Model.model.endog.mean())**2)
    RegSS = TSS - RSS
    R2 = RegSS / TSS

    ## Compute R2 adj and NE
    R2adj = 1 - RSS / TSS * (12 * N - 1) / (12 * N - X.shape[1] - 1)

    NE = np.array([])
    for i in range(0, N, 12):
        ObservedTensor = Y_Obs[i:i + 12]
        PredictedTensor = Y_Fit[i:i + 12]

        Numerator = np.sum((ObservedTensor - PredictedTensor)**2)
        Denominator = np.sum(ObservedTensor**2)

        NE = np.append(NE, np.sqrt(Numerator / Denominator))

    Line = np.linspace(min(Y_Obs.min(), Y_Fit.min()),
                       max(Y_Obs.max(), Y_Fit.max()), N)

    B_0 = np.sqrt(np.diag(np.abs(X * C * X.T)))
    t_Alpha = t.interval(Alpha, N - X.shape[1] - 1)
    CI_Line_u = Line + t_Alpha[0] * np.exp(SE) * np.exp(B_0)
    CI_Line_o = Line + t_Alpha[1] * np.exp(SE) * np.exp(B_0)

    Sii = Y_Fit * np.array(X[:, 0].T)[0]
    Sij = Y_Fit * np.array(X[:, 1].T)[0]
    Sjj = Y_Fit * np.array(X[:, 2].T)[0]

    ## Plots
    DPI = 500
    SMax = max(HealthyData[Y_Elements].max()) * 5
    SMin = min(OIData[Y_Elements].min()) / 5
    if 'BV/TV' in PlotTypes:
        Color = np.exp(Data['LogBVTV'].values)
        VMin = HealthyData['BV/TV'].min()
        VMax = HealthyData['BV/TV'].max()
        Figure, Axes = plt.subplots(1,
                                    1,
                                    figsize=(6.5, 4.5),
                                    dpi=DPI,
                                    sharey=True,
                                    sharex=True)
        Scatter = Axes.scatter(Y_Obs,
                               np.array(Y_Fit),
                               c=Color,
                               vmin=VMin,
                               vmax=VMax,
                               cmap='jet',
                               marker='o')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_u), color=(0.4, 0.4, 0.4), linestyle='--')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_o), color=(0.4, 0.4, 0.4), linestyle='--')
        Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
        Axes.annotate(r'N ROIs   : ' + str(NROIs),
                      xy=(0.3, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'N Points : ' + str(len(Y_Obs)),
                      xy=(0.3, 0.025),
                      xycoords='axes fraction')
        Axes.annotate(r'$R^2_{ajd}$: ' + format(round(R2adj, 3), '.3f'),
                      xy=(0.65, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'$NE$ : ' + format(round(NE.mean(), 2), '.2f') +
                      '$\pm$' + format(round(NE.std(), 2), '.2f'),
                      xy=(0.65, 0.025),
                      xycoords='axes fraction')
        Axes.set_xlabel('Observed $\mathrm{\mathbb{S}}$ (MPa)')
        Axes.set_ylabel('Fitted $\mathrm{\mathbb{S}}$ (MPa)')
        Axes.set_xlim([SMin, SMax])
        Axes.set_ylim([SMin, SMax])
        plt.xscale('log')
        plt.yscale('log')
        ColorBar = plt.colorbar(Scatter)
        ColorBar.ax.set_ylabel('BV/TV (-)')
        plt.subplots_adjust(left=0.15, bottom=0.15)
        plt.show()

    if 'DA' in PlotTypes:
        Color = np.exp(Data['Logmxy'].values)
        Figure, Axes = plt.subplots(1,
                                    1,
                                    figsize=(6.5, 4.5),
                                    dpi=DPI,
                                    sharey=True,
                                    sharex=True)
        Scatter = Axes.scatter(Y_Obs,
                               np.array(Y_Fit),
                               c=Color,
                               cmap='jet',
                               marker='o')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_u), color=(0.4, 0.4, 0.4), linestyle='--')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_o), color=(0.4, 0.4, 0.4), linestyle='--')
        Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
        Axes.annotate(r'N ROIs   : ' + str(NROIs),
                      xy=(0.3, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'N Points : ' + str(len(Y_Obs)),
                      xy=(0.3, 0.025),
                      xycoords='axes fraction')
        Axes.annotate(r'$R^2_{ajd}$: ' + format(round(R2adj, 3), '.3f'),
                      xy=(0.65, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'$NE$ : ' + format(round(NE.mean(), 2), '.2f') +
                      '$\pm$' + format(round(NE.std(), 2), '.2f'),
                      xy=(0.65, 0.025),
                      xycoords='axes fraction')
        Axes.set_xlabel('Observed $\mathrm{\mathbb{S}}$ (MPa)')
        Axes.set_ylabel('Fitted $\mathrm{\mathbb{S}}$ (MPa)')
        Axes.set_xlim([SMin, SMax])
        Axes.set_ylim([SMin, SMax])
        plt.xscale('log')
        plt.yscale('log')
        ColorBar = plt.colorbar(Scatter)
        ColorBar.ax.set_ylabel('Degree of Anisotropy (-)')
        plt.subplots_adjust(left=0.15, bottom=0.15)
        plt.show()

    if 'Constants' in PlotTypes:
        Figure, Axes = plt.subplots(1,
                                    1,
                                    figsize=(5.5, 4.5),
                                    dpi=DPI,
                                    sharey=True,
                                    sharex=True)
        Axes.plot(Y_Obs,
                  Sii,
                  color=Colors[0],
                  linestyle='none',
                  marker='s',
                  label=r'$\lambda_{ii}$')
        Axes.plot(Y_Obs,
                  Sij,
                  color=Colors[1],
                  linestyle='none',
                  marker='o',
                  label=r'$\lambda_{ij}$')
        Axes.plot(Y_Obs,
                  Sjj,
                  color=Colors[2],
                  linestyle='none',
                  marker='^',
                  label=r'$\mu_{ij}$')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_u), color=(0.4, 0.4, 0.4), linestyle='--')
        # Axes.plot(np.sort(Line), np.sort(CI_Line_o), color=(0.4, 0.4, 0.4), linestyle='--')
        Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
        Axes.annotate(r'N ROIs   : ' + str(NROIs),
                      xy=(0.3, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'N Points : ' + str(len(Y_Obs)),
                      xy=(0.3, 0.025),
                      xycoords='axes fraction')
        Axes.annotate(r'$R^2_{ajd}$: ' + format(round(R2adj, 3), '.3f'),
                      xy=(0.65, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'NE : ' + format(round(NE.mean(), 2), '.2f') + '$\pm$' +
                      format(round(NE.std(), 2), '.2f'),
                      xy=(0.65, 0.025),
                      xycoords='axes fraction')
        Axes.set_xlabel('Observed $\mathrm{\mathbb{S}}$ (MPa)')
        Axes.set_ylabel('Fitted $\mathrm{\mathbb{S}}$ (MPa)')
        Axes.set_xlim([SMin, SMax])
        Axes.set_ylim([SMin, SMax])
        plt.xscale('log')
        plt.yscale('log')
        plt.legend(loc='upper left')
        plt.subplots_adjust(left=0.15, bottom=0.15)
        plt.show()

    return R2adj, NE
Пример #8
0
def ComputeLMConstants(Model, Alpha=0.95, k=0, l=0):

    ## Get general model parameters
    N = int(Model.nobs)
    p = len(Model.params)

    ## Get values, covariance matrix and CI
    B = Model.params
    C = Model.cov_params()
    B_CI = Model.conf_int()

    ## Set t value for given confidence level
    t_Alpha = t.interval(Alpha, Model.nobs - 12 - 1)

    ## Compute stiffness constants
    Mu0 = np.exp(B['Sjj']) / 2
    Lambda0p = np.exp(B['Sij'])
    Lambda0 = np.exp(B['Sii']) - 2 * Mu0

    ## Compute CI for lambda 0
    C_add = np.abs(C.loc['Sii', 'Sii']) + np.abs(C.loc['Sjj', 'Sjj'])
    SE_L0 = np.sqrt(C_add + 2 * np.abs(C.loc['Sii', 'Sjj']))
    L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

    print('\n\nFit constants with ' + str(Alpha * 100) + '% CI :')
    print('Lambda0: ' + str(int(round(Lambda0, 0))) + ' [' +
          str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
          str(int(round(np.exp(L0_CI[1]), 0))) + ']')
    print('Lambda0p: ' + str(int(round(Lambda0p, 0))) + ' [' +
          str(int(round(np.exp(B_CI.loc['Sij', 0]), 0))) + ' - ' +
          str(int(round(np.exp(B_CI.loc['Sij', 1]), 0))) + ']')
    print('Mu0: ' + str(int(round(Mu0, 0))) + ' [' +
          str(int(round(np.exp(B_CI.loc['Sjj', 0]) / 2, 0))) + ' - ' +
          str(int(round(np.exp(B_CI.loc['Sjj', 1]) / 2, 0))) + ']')

    ## Build data frame
    Table = pd.DataFrame({
        'Lambda0': Lambda0,
        'Lambda0 CI': np.exp(L0_CI),
        'Lambda0p': Lambda0p,
        'Lambda0p CI': np.exp(B_CI.loc['Sij', :]),
        'Mu0': Mu0,
        'Mu0 CI': np.exp(B_CI.loc['Sjj', :]) / 2
    })

    ## Get exponent values
    if 'LogBVTV' in B and 'Logmxy' in B:
        l = B['Logmxy']
        k = B['LogBVTV']

        print('k: ' + str(round(k, 3)) + ' [' +
              str(round(B_CI.loc['LogBVTV', 0], 3)) + ' - ' +
              str(round(B_CI.loc['LogBVTV', 1], 3)) + ']')
        print('l: ' + str(round(l, 3)) + ' [' +
              str(round(B_CI.loc['Logmxy', 0], 3)) + ' - ' +
              str(round(B_CI.loc['Logmxy', 1], 3)) + ']' + '\n\n')

        Table[['k', 'k CI', 'l',
               'l CI']] = k, B_CI.loc['LogBVTV', :], l, B_CI.loc['Logmxy', :]

    else:
        print('k: ' + str(round(k, 3)))
        print('l: ' + str(round(l, 3)) + '\n\n')

        Table[['k', 'l']] = k, l

    ## Compute R2 and standard error of the estimate
    E = Model.resid.values
    RSS = np.sum(E**2)
    SE = np.sqrt(RSS / Model.df_resid)
    TSS = np.sum((Model.model.endog - Model.model.endog.mean())**2)
    RegSS = TSS - RSS
    R2 = RegSS / TSS

    ## Compute R2 adj and NE
    R2adj = 1 - RSS / TSS * (12 * N - 1) / (12 * N - p - 1)

    NE = np.array([])
    Y_Obs = np.exp(Model.model.endog)
    Y_Fit = np.exp(Model.fittedvalues)
    for i in range(0, N, 12):
        ObservedTensor = Y_Obs[i:i + 12]
        PredictedTensor = Y_Fit[i:i + 12]

        Numerator = np.sum((ObservedTensor - PredictedTensor)**2)
        Denominator = np.sum(ObservedTensor**2)

        NE = np.append(NE, np.sqrt(Numerator / Denominator))

    Table[['R2', 'SE', 'R2 adj', 'NE',
           'NE std']] = R2, SE, R2adj, np.mean(NE), np.std(NE)

    ## Partial R2
    for Parameter in B.index:
        if 'Var' in Parameter:
            Y_Predict = np.exp(Model.predict())
            E_Predict = np.log(Y_Obs) - np.log(Y_Predict)
            RSS_Predict = np.sum(E_Predict**2)
            SE_Predict = np.sqrt(RSS_Predict / Model.df_resid)
            RegSS_Predict = TSS - RSS_Predict
            R2_Predict = RegSS_Predict / TSS

            ## Compute R2 adj and NE
            R2adj_Predict = 1 - RSS_Predict / TSS * (12 * N - 1) / (12 * N -
                                                                    p - 1)

            NE_Predict = np.array([])
            for i in range(0, N, 12):
                ObservedTensor = Y_Obs[i:i + 12]
                PredictedTensor = Y_Predict[i:i + 12]

                Numerator = np.sum((ObservedTensor - PredictedTensor)**2)
                Denominator = np.sum(ObservedTensor**2)

                NE_Predict = np.append(NE_Predict,
                                       np.sqrt(Numerator / Denominator))

            Table[['R2 Partial', 'SE Partial', 'R2 adj Partial', 'NE Partial', 'NE std Partial']] =\
                R2_Predict, SE_Predict, R2adj_Predict, np.mean(NE_Predict), np.std(NE_Predict)

    return Table
Пример #9
0
def PlotRegressionResults(Data, Y_Obs, Y_Fit, SE, R2, X, C_x, Alpha=0.95):

    N = len(Y_Obs)

    Line = np.linspace(min(Y_Obs.min(), Y_Fit.min()),
                       max(Y_Obs.max(), Y_Fit.max()), N)

    B_0 = np.sqrt(np.diag(np.abs(X * C_x * X.T)))
    t_Alpha = t.interval(Alpha, N - X.shape[1] - 1)
    CI_Line_u = Line + t_Alpha[0] * np.exp(SE) * np.exp(B_0)
    CI_Line_o = Line + t_Alpha[1] * np.exp(SE) * np.exp(B_0)

    Sii = Y_Fit * np.array(X[:, 0].T)[0]
    Sij = Y_Fit * np.array(X[:, 1].T)[0]
    Sjj = Y_Fit * np.array(X[:, 2].T)[0]

    ## Plots
    DPI = 100
    Figure, Axes = plt.subplots(1,
                                1,
                                figsize=(5.5, 4.5),
                                dpi=DPI,
                                sharey=True,
                                sharex=True)
    Axes.plot(Y_Obs,
              Sii,
              color=(0, 0, 1),
              linestyle='none',
              marker='o',
              label=r'$\lambda_{ii}$')
    Axes.plot(Y_Obs,
              Sij,
              color=(0, 1, 0),
              linestyle='none',
              marker='o',
              label=r'$\lambda_{ij}$')
    Axes.plot(Y_Obs,
              Sjj,
              color=(1, 0, 0),
              linestyle='none',
              marker='o',
              label=r'$\mu_{ij}$')
    Axes.plot(np.sort(Line),
              np.sort(CI_Line_u),
              color=(0.4, 0.4, 0.4),
              linestyle='--')
    Axes.plot(np.sort(Line),
              np.sort(CI_Line_o),
              color=(0.4, 0.4, 0.4),
              linestyle='--')
    Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
    # Axes.annotate(r'N: ' + str(len(Y_Obs)), (10 ** 3, 20 ** 1))
    # Axes.annotate(r'$R^2$: ' + str(round(R2, 4)), (10 ** 3, 10 ** 1))
    Axes.annotate(r'N: ' + str(len(Y_Obs)), (3 * 10**2, 3 * 10**-1))
    Axes.annotate(r'$R^2$: ' + str(round(R2, 4)), (3 * 10**2, 1.5 * 10**-1))
    Axes.set_xlabel('Observed $\mathbb{S}_{xy}$')
    Axes.set_ylabel(r'Fitted $\mathbb{S}_{xy}$')
    plt.xscale('log')
    plt.yscale('log')
    plt.ylim([10**-1, 6 * 10**3])
    plt.legend(loc='upper left')
    plt.show()

    Indices = Data[Data['Group'] == 'Control'].index
    Figure, Axes = plt.subplots(1,
                                1,
                                figsize=(5.5, 4.5),
                                dpi=DPI,
                                sharey=True,
                                sharex=True)
    Axes.plot(Y_Obs,
              Y_Fit,
              alpha=0.1,
              color=(0, 0, 0),
              linestyle='none',
              marker='o')
    Axes.plot(Y_Obs[Indices],
              Sii[:len(Indices)],
              color=(0, 0, 1),
              linestyle='none',
              marker='o',
              label=r'$\lambda_{ii}$')
    Axes.plot(Y_Obs[Indices],
              Sij[:len(Indices)],
              color=(0, 1, 0),
              linestyle='none',
              marker='o',
              label=r'$\lambda_{ij}$')
    Axes.plot(Y_Obs[Indices],
              Sjj[:len(Indices)],
              color=(1, 0, 0),
              linestyle='none',
              marker='o',
              label=r'$\mu_{ij}$')
    Axes.plot(np.sort(Line),
              np.sort(CI_Line_u),
              color=(0.4, 0.4, 0.4),
              linestyle='--')
    Axes.plot(np.sort(Line),
              np.sort(CI_Line_o),
              color=(0.4, 0.4, 0.4),
              linestyle='--')
    Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
    Axes.set_xlabel('Observed $\mathbb{S}_{xy}$')
    Axes.set_ylabel(r'Fitted $\mathbb{S}_{xy}$')
    plt.xscale('log')
    plt.yscale('log')
    plt.ylim([10**-1, 6 * 10**3])
    plt.legend(loc='upper left')
    plt.show()

    Indices = Data[Data['Group'] == 'Test'].index
    Figure, Axes = plt.subplots(1,
                                1,
                                figsize=(5.5, 4.5),
                                dpi=DPI,
                                sharey=True,
                                sharex=True)
    Axes.plot(Y_Obs,
              Y_Fit,
              alpha=0.1,
              color=(0, 0, 0),
              linestyle='none',
              marker='o')
    Axes.plot(Y_Obs[Indices],
              Sii[-len(Indices):],
              color=(0, 0, 1),
              linestyle='none',
              marker='o',
              label=r'$\lambda_{ii}$')
    Axes.plot(Y_Obs[Indices],
              Sij[-len(Indices):],
              color=(0, 1, 0),
              linestyle='none',
              marker='o',
              label=r'$\lambda_{ij}$')
    Axes.plot(Y_Obs[Indices],
              Sjj[-len(Indices):],
              color=(1, 0, 0),
              linestyle='none',
              marker='o',
              label=r'$\mu_{ij}$')
    Axes.plot(np.sort(Line),
              np.sort(CI_Line_u),
              color=(0.4, 0.4, 0.4),
              linestyle='--')
    Axes.plot(np.sort(Line),
              np.sort(CI_Line_o),
              color=(0.4, 0.4, 0.4),
              linestyle='--')
    Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
    Axes.set_xlabel('Observed $\mathbb{S}_{xy}$')
    Axes.set_ylabel(r'Fitted $\mathbb{S}_{xy}$')
    plt.xscale('log')
    plt.yscale('log')
    plt.legend(loc='upper left')
    plt.show()

    return
Пример #10
0
def ComputeCoefficients(B, B_CI, C, N, GroupMeans, Alpha=0.95):

    l = B[4]
    k = B[3]
    Mu0 = np.exp(B[2])
    Lambda0p = np.exp(B[1])
    Lambda0 = np.exp(B[0]) - 2 * Mu0

    # Compute CI for lambda 0
    t_Alpha = t.interval(Alpha, N - 12 - 1)

    RSS = np.sum(Model_Fit.resid.values**2)
    SE = np.sqrt(RSS / (N - 12))

    C_add = np.abs(C[0, 0]) + np.abs(C[2, 2])
    SE_L0 = SE * np.sqrt(C_add)
    L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

    print('\n Mean constants:')
    print('Lambda0: ' + str(int(round(Lambda0, 0))) + ' [' +
          str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
          str(int(round(np.exp(L0_CI[1]), 0))) + ']')
    print('Lambda0p: ' + str(int(round(Lambda0p, 0))) + ' [' +
          str(int(round(np.exp(B_CI[1, 0]), 0))) + ' - ' +
          str(int(round(np.exp(B_CI[1, 1]), 0))) + ']')
    print('Mu0: ' + str(int(round(Mu0, 0))) + ' [' +
          str(int(round(np.exp(B_CI[2, 0]), 0))) + ' - ' +
          str(int(round(np.exp(B_CI[2, 1]), 0))) + ']')
    print('k: ' + str(round(k, 3)) + ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
          str(round(B_CI[3, 1], 3)) + ']')
    print('l: ' + str(round(l, 3)) + ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
          str(round(B_CI[4, 1], 3)) + ']')

    # Compute CI for combination of parameters
    Combinations = [[1, 6], [2, 7]]
    SE_B_Comb = np.zeros(len(Combinations))
    B_CI_Comb = np.zeros((len(Combinations), 2))

    for i in range(len(Combinations)):
        Combination = Combinations[i]
        C_add = 0
        for j in Combination:
            C_add += C[j, j]
        SE_B_Comb[i] += SE * np.sqrt(C_add)
        B_CI_Comb[i] += np.array(B[Combination[0]] +
                                 GroupMeans[2] * B[Combination[1]] +
                                 SE_B_Comb[i] * np.array(t_Alpha))

    Mu0 = np.exp(B[2] + GroupMeans[2] * B[7])
    Lambda0p = np.exp(B[1] + GroupMeans[2] * B[6])
    Lambda0 = np.exp(B[0] + GroupMeans[2] * B[5]) - 2 * Mu0

    # Compute CI for lambda 0
    C_add = np.abs(C[0, 0]) + np.abs(C[2, 2]) + np.abs(C[5, 5]) + np.abs(C[7,
                                                                           7])
    SE_L0 = SE * np.sqrt(C_add)
    L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

    print('\n Mean constants:')
    print('Lambda0: ' + str(int(round(Lambda0, 0))) + ' [' +
          str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
          str(int(round(np.exp(L0_CI[1]), 0))) + ']')
    print('Lambda0p: ' + str(int(round(Lambda0p, 0))) + ' [' +
          str(int(round(np.exp(B_CI_Comb[0, 0]), 0))) + ' - ' +
          str(int(round(np.exp(B_CI_Comb[0, 1]), 0))) + ']')
    print('Mu0: ' + str(int(round(Mu0, 0))) + ' [' +
          str(int(round(np.exp(B_CI_Comb[1, 0]), 0))) + ' - ' +
          str(int(round(np.exp(B_CI_Comb[1, 1]), 0))) + ']')
    print('k: ' + str(round(k, 3)) + ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
          str(round(B_CI[3, 1], 3)) + ']')
    print('l: ' + str(round(l, 3)) + ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
          str(round(B_CI[4, 1], 3)) + ']')

    # Compute CI for combination of parameters
    Combinations = [[1, 6], [2, 7]]
    SE_B_Comb = np.zeros(len(Combinations))
    B_CI_Comb = np.zeros((len(Combinations), 2))

    for i in range(len(Combinations)):
        Combination = Combinations[i]
        C_add = 0
        for j in Combination:
            C_add += C[j, j]
        SE_B_Comb[i] += SE * np.sqrt(C_add)
        B_CI_Comb[i] += np.array(B[Combination[0]] +
                                 GroupMeans[0] * B[Combination[1]] +
                                 SE_B_Comb[i] * np.array(t_Alpha))

    Mu0 = np.exp(B[2] + GroupMeans[0] * B[7])
    Lambda0p = np.exp(B[1] + GroupMeans[0] * B[6])
    Lambda0 = np.exp(B[0] + GroupMeans[0] * B[5]) - 2 * Mu0

    # Compute CI for lambda 0
    C_add = np.abs(C[0, 0]) + np.abs(C[2, 2]) + np.abs(C[5, 5]) + np.abs(C[7,
                                                                           7])
    SE_L0 = SE * np.sqrt(C_add)
    L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

    print('\n Healthy constants:')
    print('Lambda0: ' + str(int(round(Lambda0, 0))) + ' [' +
          str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
          str(int(round(np.exp(L0_CI[1]), 0))) + ']')
    print('Lambda0p: ' + str(int(round(Lambda0p, 0))) + ' [' +
          str(int(round(np.exp(B_CI_Comb[0, 0]), 0))) + ' - ' +
          str(int(round(np.exp(B_CI_Comb[0, 1]), 0))) + ']')
    print('Mu0: ' + str(int(round(Mu0, 0))) + ' [' +
          str(int(round(np.exp(B_CI_Comb[1, 0]), 0))) + ' - ' +
          str(int(round(np.exp(B_CI_Comb[1, 1]), 0))) + ']')
    print('k: ' + str(round(k, 3)) + ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
          str(round(B_CI[3, 1], 3)) + ']')
    print('l: ' + str(round(l, 3)) + ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
          str(round(B_CI[4, 1], 3)) + ']')

    # Compute CI for combination of parameters
    Combinations = [[1, 6], [2, 7]]
    SE_B_Comb = np.zeros(len(Combinations))
    B_CI_Comb = np.zeros((len(Combinations), 2))

    for i in range(len(Combinations)):
        Combination = Combinations[i]
        C_add = 0
        for j in Combination:
            C_add += C[j, j]
        SE_B_Comb[i] += SE * np.sqrt(C_add)
        B_CI_Comb[i] += np.array(B[Combination[0]] +
                                 GroupMeans[1] * B[Combination[1]] +
                                 SE_B_Comb[i] * np.array(t_Alpha))

    Mu0 = np.exp(B[2] + GroupMeans[1] * B[7])
    Lambda0p = np.exp(B[1] + GroupMeans[1] * B[6])
    Lambda0 = np.exp(B[0] + GroupMeans[1] * B[5]) - 2 * Mu0

    # Compute CI for lambda 0
    C_add = np.abs(C[0, 0]) + np.abs(C[2, 2]) + np.abs(C[5, 5]) + np.abs(C[7,
                                                                           7])
    SE_L0 = SE * np.sqrt(C_add)
    L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

    print('\n OI constants:')
    print('Lambda0: ' + str(int(round(Lambda0, 0))) + ' [' +
          str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
          str(int(round(np.exp(L0_CI[1]), 0))) + ']')
    print('Lambda0p: ' + str(int(round(Lambda0p, 0))) + ' [' +
          str(int(round(np.exp(B_CI_Comb[0, 0]), 0))) + ' - ' +
          str(int(round(np.exp(B_CI_Comb[0, 1]), 0))) + ']')
    print('Mu0: ' + str(int(round(Mu0, 0))) + ' [' +
          str(int(round(np.exp(B_CI_Comb[1, 0]), 0))) + ' - ' +
          str(int(round(np.exp(B_CI_Comb[1, 1]), 0))) + ']')
    print('k: ' + str(round(k, 3)) + ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
          str(round(B_CI[3, 1], 3)) + ']')
    print('l: ' + str(round(l, 3)) + ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
          str(round(B_CI[4, 1], 3)) + ']')

    return
Пример #11
0
    C = C[:-1,:-1]


## Compute R2 and standard error of the estimate
E = Model.resid.values
RSS = np.sum(E ** 2)
SE = np.sqrt(RSS / Model.df_resid)
TSS = np.sum((Model.model.endog - Model.model.endog.mean()) ** 2)
RegSS = TSS - RSS
R2 = RegSS / TSS

Line = np.linspace(min(Y_Obs.min(), Y_Fit.min()),
                   max(Y_Obs.max(), Y_Fit.max()), N)

B_0 = np.sqrt(np.diag(np.abs(X * C * X.T)))
t_Alpha = t.interval(Alpha, N - X.shape[1] - 1)
CI_Line_u = Line + t_Alpha[0] * np.exp(SE) * np.exp(B_0)
CI_Line_o = Line + t_Alpha[1] * np.exp(SE) * np.exp(B_0)

Sii = Y_Fit * np.array(X[:, 0].T)[0]
Sij = Y_Fit * np.array(X[:, 1].T)[0]
Sjj = Y_Fit * np.array(X[:, 2].T)[0]

## Plots
DPI = 100
SMax = max(HealthyData[Y_Elements].max()) * 5
SMin = min(OIData[Y_Elements].min()) / 5

Data2Plot = pd.DataFrame()
i = 0
for Index in CV_Ordered.index:
Пример #12
0
def PlotRegressionResults(Model, Alpha=0.95):

    print(Model.summary())

    ## Plot results
    Y_Obs = Model.model.endog
    Y_Fit = Model.fittedvalues
    N = int(Model.nobs)
    C = np.matrix(Model.cov_params())
    X = np.matrix(Model.model.exog)
    X_Obs = np.sort(np.array(X[:, 1]).reshape(len(X)))

    ## Compute R2 and standard error of the estimate
    E = Y_Obs - Y_Fit
    RSS = np.sum(E**2)
    SE = np.sqrt(RSS / Model.df_resid)
    TSS = np.sum((Model.model.endog - Model.model.endog.mean())**2)
    RegSS = TSS - RSS
    R2 = RegSS / TSS
    R2adj = 1 - RSS / TSS * (N - 1) / (N - X.shape[1] + 1 - 1)

    ## Compute CI lines
    B_0 = np.sqrt(np.diag(np.abs(X * C * X.T)))
    t_Alpha = t.interval(Alpha, N - X.shape[1] - 1)
    CI_Line_u = Y_Fit + t_Alpha[0] * SE * B_0
    CI_Line_o = Y_Fit + t_Alpha[1] * SE * B_0

    t_Alpha2 = t.interval(0.9, N - X.shape[1] - 1)
    CI_Line_u2 = Y_Fit + t_Alpha2[0] * SE * B_0
    CI_Line_o2 = Y_Fit + t_Alpha2[1] * SE * B_0

    ## Plots
    DPI = 100
    Figure, Axes = plt.subplots(1,
                                1,
                                figsize=(5.5, 4.5),
                                dpi=DPI,
                                sharey=True,
                                sharex=True)
    Axes.plot(X[:, 1], Y_Fit, color=(1, 0, 0))

    if Model.model.endog_names == 'Stiffness':
        Axes.fill_between(X_Obs,
                          np.sort(CI_Line_o2),
                          np.sort(CI_Line_u2),
                          color=(0, 0, 0),
                          alpha=0.1)
        Axes.plot(X_Obs, np.sort(CI_Line_u), color=(0, 0, 1), linestyle='--')
        Axes.plot(X_Obs, np.sort(CI_Line_o), color=(0, 0, 1), linestyle='--')
        Axes.annotate(r'$N$  : ' + str(N),
                      xy=(0.05, 0.875),
                      xycoords='axes fraction')
        Axes.annotate(r'$R^2$ : ' + format(round(R2, 2), '.2f'),
                      xy=(0.05, 0.8),
                      xycoords='axes fraction')
        Axes.annotate(r'$SE$ : ' + format(round(SE, 2), '.2f'),
                      xy=(0.05, 0.725),
                      xycoords='axes fraction')
        Axes.set_ylabel('Loading Max Stiffness (kN/mm)')

    elif Model.model.endog_names == 'Load':
        Axes.fill_between(X_Obs,
                          np.sort(CI_Line_o2)[::-1],
                          np.sort(CI_Line_u2)[::-1],
                          color=(0, 0, 0),
                          alpha=0.1)
        Axes.plot(X_Obs,
                  np.sort(CI_Line_u)[::-1],
                  color=(0, 0, 1),
                  linestyle='--')
        Axes.plot(X_Obs,
                  np.sort(CI_Line_o)[::-1],
                  color=(0, 0, 1),
                  linestyle='--')
        Axes.annotate(r'$N$  : ' + str(N),
                      xy=(0.05, 0.175),
                      xycoords='axes fraction')
        Axes.annotate(r'$R^2$ : ' + format(round(R2, 2), '.2f'),
                      xy=(0.05, 0.1),
                      xycoords='axes fraction')
        Axes.annotate(r'$SE$ : ' + format(round(SE, 2), '.2f'),
                      xy=(0.05, 0.025),
                      xycoords='axes fraction')
        Axes.set_ylabel('Ultimate Load (kN)')

    Axes.plot(X[:, 1],
              Y_Obs,
              linestyle='none',
              marker='o',
              color=(0, 0, 0),
              fillstyle='none')
    Axes.set_xlabel('BMC (HA mg)')
    plt.subplots_adjust(left=0.15, bottom=0.15)
    plt.show()
    plt.close(Figure)
Пример #13
0
def ComputeCoefficients(X, Y, Alpha=0.95):

    # Solve using matrix computation
    LHS = X.T * X
    RHS = X.T * np.matrix(Y).T
    B = np.linalg.solve(LHS, RHS)
    print('\nBi coefficients:')
    print(B.round(6))

    # Compute residuals and goodness of fit
    E = np.matrix(Y).T - X * B
    E = np.array(E)[:, 0]
    RSS = np.sum(E ** 2)
    SE = np.sqrt(RSS / (N - X.shape[1]))
    TSS = np.sum((Y - Y.mean()) ** 2)
    RegSS = TSS - RSS
    R2 = RegSS / TSS

    # Covariance matrix and standard error of the coefficients
    C = np.linalg.inv(X.T * X)
    SE_B = SE * np.sqrt(np.matrix(np.abs(np.diag(C))).T)
    print('\nBi standard errors:')
    print(SE_B.round(6))

    # Build 95% CI for slopes
    t_Alpha = t.interval(Alpha, N - X.shape[1] - 1)

    B_CI = B + SE_B * np.array(t_Alpha)


    if X.shape[1] < 8:

        Mu0 = np.exp(B[2, 0])
        Lambda0p = np.exp(B[1, 0])
        Lambda0 = np.exp(B[0, 0]) - 2 * Mu0

        # Compute CI for lambda 0
        C_add = np.abs(C[0, 0]) + np.abs(C[2, 2])
        SE_L0 = SE * np.sqrt(C_add)
        L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

        print('\n Mean constants:')
        print('Lambda0: ' + str(int(round(Lambda0, 0))) +
              ' [' + str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
              str(int(round(np.exp(L0_CI[1]), 0))) + ']')
        print('Lambda0p: ' + str(int(round(Lambda0p, 0))) +
              ' [' + str(int(round(np.exp(B_CI[1, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI[1, 1]), 0))) + ']')
        print('Mu0: ' + str(int(round(Mu0, 0))) +
              ' [' + str(int(round(np.exp(B_CI[2, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI[2, 1]), 0))) + ']')
    if X.shape[1] > 3:
        l = B[4, 0]
        k = B[3, 0]
        print('k: ' + str(round(k, 3)) +
              ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
              str(round(B_CI[3, 1], 3)) + ']')
        print('l: ' + str(round(l, 3)) +
              ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
              str(round(B_CI[4, 1], 3)) + ']')

    if X.shape[1] == 8:

        # Compute CI for combination of parameters
        Combinations = [[1, 6], [2, 7]]
        SE_B_Comb = np.zeros(len(Combinations))
        B_CI_Comb = np.zeros((len(Combinations), 2))

        for i in range(len(Combinations)):
            Combination = Combinations[i]
            C_add = 0
            for j in Combination:
                C_add += C[j, j]
            SE_B_Comb[i] += SE * np.sqrt(C_add)
            B_CI_Comb[i] += np.array(B[Combination[0]] + B[Combination[1]] + SE_B_Comb[i] * np.array(t_Alpha))[0]

        Mu0 = np.exp(B[2, 0] - B[7, 0])
        Lambda0p = np.exp(B[1, 0] - B[6, 0])
        Lambda0 = np.exp(B[0, 0] - B[5, 0]) - 2 * Mu0

        # Compute CI for lambda 0
        C_add = np.abs(C[0, 0]) + np.abs(C[2, 2]) + np.abs(C[5, 5]) + np.abs(C[7, 7])
        SE_L0 = SE * np.sqrt(C_add)
        L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

        print('\n Healthy constants:')
        print('Lambda0: ' + str(int(round(Lambda0, 0))) +
              ' [' + str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
              str(int(round(np.exp(L0_CI[1]), 0))) + ']')
        print('Lambda0p: ' + str(int(round(Lambda0p, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[0, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[0, 1]), 0))) + ']')
        print('Mu0: ' + str(int(round(Mu0, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[1, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[1, 1]), 0))) + ']')
        print('k: ' + str(round(k, 3)) +
              ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
              str(round(B_CI[3, 1], 3)) + ']')
        print('l: ' + str(round(l, 3)) +
              ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
              str(round(B_CI[4, 1], 3)) + ']')

        # Compute CI for combination of parameters
        Combinations = [[1, 6], [2, 7]]
        SE_B_Comb = np.zeros(len(Combinations))
        B_CI_Comb = np.zeros((len(Combinations), 2))

        for i in range(len(Combinations)):
            Combination = Combinations[i]
            C_add = 0
            for j in Combination:
                C_add += C[j, j]
            SE_B_Comb[i] += SE * np.sqrt(C_add)
            B_CI_Comb[i] += np.array(B[Combination[0]] - B[Combination[1]] + SE_B_Comb[i] * np.array(t_Alpha))[0]

        Mu0 = np.exp(B[2, 0] + B[7, 0])
        Lambda0p = np.exp(B[1, 0] + B[6, 0])
        Lambda0 = np.exp(B[0, 0] + B[5, 0]) - 2 * Mu0

        # Compute CI for lambda 0
        C_add = np.abs(C[0, 0]) + np.abs(C[2, 2]) + np.abs(C[5, 5]) + np.abs(C[7, 7])
        SE_L0 = SE * np.sqrt(C_add)
        L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

        print('\n OI constants:')
        print('Lambda0: ' + str(int(round(Lambda0, 0))) +
              ' [' + str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
              str(int(round(np.exp(L0_CI[1]), 0))) + ']')
        print('Lambda0p: ' + str(int(round(Lambda0p, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[0, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[0, 1]), 0))) + ']')
        print('Mu0: ' + str(int(round(Mu0, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[1, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[1, 1]), 0))) + ']')
        print('k: ' + str(round(k, 3)) +
              ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
              str(round(B_CI[3, 1], 3)) + ']')
        print('l: ' + str(round(l, 3)) +
              ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
              str(round(B_CI[4, 1], 3)) + ']')

    elif X.shape[1] == 11:

        Combinations = [[1, 6, 9], [2, 7, 10]]

        # Compute CI for combination of parameters
        SE_B_Comb = np.zeros(len(Combinations))
        B_CI_Comb = np.zeros((len(Combinations), 2))

        for i in range(len(Combinations)):
            Combination = Combinations[i]
            C_add = 0
            for j in Combination:
                C_add += C[j, j]
            SE_B_Comb[i] += SE * np.sqrt(C_add)
            B_CI_Comb[i] += np.array(B[Combination[0]] +
                                     B[Combination[1]] +
                                     B[Combination[2]] +
                                     SE_B_Comb[i] * np.array(t_Alpha))[0]

        Mu0 = np.exp(B[2, 0] + (-1)*B[7, 0] + X[:,10].mean() * B[10,0])
        Lambda0p = np.exp(B[1, 0] + (-1)*B[6, 0] + X[:,9].mean() * B[9,0])
        Lambda0 = np.exp(B[0, 0] + (-1)*B[5, 0] + X[:,8].mean() * B[8,0]) - 2 * Mu0

        # Compute CI for lambda 0
        C_add = np.abs(C[0, 0]) + np.abs(C[2, 2]) + \
                np.abs(C[5, 5]) + np.abs(C[7, 7]) + \
                np.abs(C[8, 8]) + np.abs(C[10, 10])
        SE_L0 = SE * np.sqrt(C_add)
        L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

        print('\n Healthy constants:')
        print('Lambda0: ' + str(int(round(Lambda0, 0))) +
              ' [' + str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
              str(int(round(np.exp(L0_CI[1]), 0))) + ']')
        print('Lambda0p: ' + str(int(round(Lambda0p, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[0, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[0, 1]), 0))) + ']')
        print('Mu0: ' + str(int(round(Mu0, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[1, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[1, 1]), 0))) + ']')
        print('k: ' + str(round(k, 3)) +
              ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
              str(round(B_CI[3, 1], 3)) + ']')
        print('l: ' + str(round(l, 3)) +
              ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
              str(round(B_CI[4, 1], 3)) + ']')

        # Compute CI for combination of parameters
        SE_B_Comb = np.zeros(len(Combinations))
        B_CI_Comb = np.zeros((len(Combinations), 2))

        for i in range(len(Combinations)):
            Combination = Combinations[i]
            C_add = 0
            for j in Combination:
                C_add += C[j, j]
            SE_B_Comb[i] += SE * np.sqrt(C_add)
            B_CI_Comb[i] += np.array(B[Combination[0]] -
                                     B[Combination[1]] -
                                     B[Combination[2]] + SE_B_Comb[i] * np.array(t_Alpha))[0]

        Mu0 = np.exp(B[2, 0] + (+1) * B[7, 0] + X[:, 10].mean() * B[10, 0])
        Lambda0p = np.exp(B[1, 0] + (+1) * B[6, 0] + X[:, 9].mean() * B[9, 0])
        Lambda0 = np.exp(B[0, 0] + (+1) * B[5, 0] + X[:, 8].mean() * B[8, 0]) - 2 * Mu0

        # Compute CI for lambda 0
        C_add = np.abs(C[0, 0]) + np.abs(C[2, 2]) + \
                np.abs(C[5, 5]) + np.abs(C[7, 7]) + \
                np.abs(C[8, 8]) + np.abs(C[10, 10])
        SE_L0 = SE * np.sqrt(C_add)
        L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

        print('\n OI constants:')
        print('Lambda0: ' + str(int(round(Lambda0, 0))) +
              ' [' + str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
              str(int(round(np.exp(L0_CI[1]), 0))) + ']')
        print('Lambda0p: ' + str(int(round(Lambda0p, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[0, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[0, 1]), 0))) + ']')
        print('Mu0: ' + str(int(round(Mu0, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[1, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[1, 1]), 0))) + ']')
        print('k: ' + str(round(k, 3)) +
              ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
              str(round(B_CI[3, 1], 3)) + ']')
        print('l: ' + str(round(l, 3)) +
              ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
              str(round(B_CI[4, 1], 3)) + ']')

    elif X.shape[1] == 12:

        Combinations = [[1, 6, 9], [2, 7, 10]]

        # Compute CI for combination of parameters
        SE_B_Comb = np.zeros(len(Combinations))
        B_CI_Comb = np.zeros((len(Combinations), 2))

        for i in range(len(Combinations)):
            Combination = Combinations[i]
            C_add = 0
            for j in Combination:
                C_add += C[j, j]
            SE_B_Comb[i] += SE * np.sqrt(C_add)
            B_CI_Comb[i] += np.array(B[Combination[0]] +
                                     B[Combination[1]] +
                                     B[Combination[2]] +
                                     SE_B_Comb[i] * np.array(t_Alpha))[0]

        Mu0 = np.exp(B[2, 0] + (-1) * B[7, 0] + X[:, 10].mean() * B[10, 0])
        Lambda0p = np.exp(B[1, 0] + (-1) * B[6, 0] + X[:, 9].mean() * B[9, 0])
        Lambda0 = np.exp(B[0, 0] + (-1) * B[5, 0] + X[:, 8].mean() * B[8, 0]) - 2 * Mu0

        # Compute CI for lambda 0
        C_add = np.abs(C[0, 0]) + np.abs(C[2, 2]) + \
                np.abs(C[5, 5]) + np.abs(C[7, 7]) + \
                np.abs(C[8, 8]) + np.abs(C[10, 10])
        SE_L0 = SE * np.sqrt(C_add)
        L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

        print('\n Healthy constants:')
        print('Lambda0: ' + str(int(round(Lambda0, 0))) +
              ' [' + str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
              str(int(round(np.exp(L0_CI[1]), 0))) + ']')
        print('Lambda0p: ' + str(int(round(Lambda0p, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[0, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[0, 1]), 0))) + ']')
        print('Mu0: ' + str(int(round(Mu0, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[1, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[1, 1]), 0))) + ']')
        print('k: ' + str(round(k, 3)) +
              ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
              str(round(B_CI[3, 1], 3)) + ']')
        print('l: ' + str(round(l, 3)) +
              ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
              str(round(B_CI[4, 1], 3)) + ']')

        # Compute CI for combination of parameters
        SE_B_Comb = np.zeros(len(Combinations))
        B_CI_Comb = np.zeros((len(Combinations), 2))

        for i in range(len(Combinations)):
            Combination = Combinations[i]
            C_add = 0
            for j in Combination:
                C_add += C[j, j]
            SE_B_Comb[i] += SE * np.sqrt(C_add)
            B_CI_Comb[i] += np.array(B[Combination[0]] -
                                     B[Combination[1]] -
                                     B[Combination[2]] + SE_B_Comb[i] * np.array(t_Alpha))[0]

        Mu0 = np.exp(B[2, 0] + (+1) * B[7, 0] + X[:, 10].mean() * B[10, 0])
        Lambda0p = np.exp(B[1, 0] + (+1) * B[6, 0] + X[:, 9].mean() * B[9, 0])
        Lambda0 = np.exp(B[0, 0] + (+1) * B[5, 0] + X[:, 8].mean() * B[8, 0]) - 2 * Mu0

        # Compute CI for lambda 0
        C_add = np.abs(C[0, 0]) + np.abs(C[2, 2]) + \
                np.abs(C[5, 5]) + np.abs(C[7, 7]) + \
                np.abs(C[8, 8]) + np.abs(C[10, 10])
        SE_L0 = SE * np.sqrt(C_add)
        L0_CI = np.log(Lambda0) + SE_L0 * np.array(t_Alpha)

        print('\n OI constants:')
        print('Lambda0: ' + str(int(round(Lambda0, 0))) +
              ' [' + str(int(round(np.exp(L0_CI[0]), 0))) + ' - ' +
              str(int(round(np.exp(L0_CI[1]), 0))) + ']')
        print('Lambda0p: ' + str(int(round(Lambda0p, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[0, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[0, 1]), 0))) + ']')
        print('Mu0: ' + str(int(round(Mu0, 0))) +
              ' [' + str(int(round(np.exp(B_CI_Comb[1, 0]), 0))) + ' - ' +
              str(int(round(np.exp(B_CI_Comb[1, 1]), 0))) + ']')
        print('k: ' + str(round(k, 3)) +
              ' [' + str(round(B_CI[3, 0], 3)) + ' - ' +
              str(round(B_CI[3, 1], 3)) + ']')
        print('l: ' + str(round(l, 3)) +
              ' [' + str(round(B_CI[4, 0], 3)) + ' - ' +
              str(round(B_CI[4, 1], 3)) + ']')

    return B, SE_B, B_CI, SE, R2
Пример #14
0
def PlotRegressionResults(X, B, Data, Y, Alpha=0.95):

    # Compute sum of square
    E = np.matrix(np.log(Data[Y])).T - X * B
    E = np.array(E)[:, 0]
    RSS = np.sum(E ** 2)
    SE = np.sqrt(RSS / (N - X.shape[1]))

    # Compute quality of fit
    TSS = np.sum((np.log(Data[Y]) - np.log(Data[Y]).mean()) ** 2)
    RegSS = TSS - RSS
    R2 = RegSS / TSS

    # Compute variance-covariance matrix
    C = np.linalg.inv(X.T * X)

    # Plot regression result
    Y_Fit = np.exp(np.array(X * B)[:, 0])
    Line = np.linspace(min(Data[Y].min(), Y_Fit.min()),
                       max(Data[Y].max(), Y_Fit.max()), len(Data[Y]))

    B_0 = np.sqrt(np.diag(np.abs(X * C * X.T)))
    t_Alpha = t.interval(Alpha, N - X.shape[1] - 1)
    CI_Line_u = Line + t_Alpha[0] * np.exp(SE) * np.exp(B_0)
    CI_Line_o = Line + t_Alpha[1] * np.exp(SE) * np.exp(B_0)

    Sii = Y_Fit * np.array(X[:, 0].T)[0]
    Sij = Y_Fit * np.array(X[:, 1].T)[0]
    Sjj = Y_Fit * np.array(X[:, 2].T)[0]

    ## Plots
    Figure, Axes = plt.subplots(1, 1, figsize=(5.5, 4.5), dpi=500, sharey=True, sharex=True)
    Axes.plot(Data[Y], Sii,
              color=(0, 0, 1), linestyle='none', marker='o', label=r'$\lambda_{ii}$')
    Axes.plot(Data[Y], Sij,
              color=(0, 1, 0), linestyle='none', marker='o', label=r'$\lambda_{ij}$')
    Axes.plot(Data[Y], Sjj,
              color=(1, 0, 0), linestyle='none', marker='o', label=r'$\mu_{ij}$')
    Axes.plot(np.sort(Line), np.sort(CI_Line_u), color=(0.4, 0.4, 0.4), linestyle='--')
    Axes.plot(np.sort(Line), np.sort(CI_Line_o), color=(0.4, 0.4, 0.4), linestyle='--')
    Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
    Axes.annotate(r'N: ' + str(len(Data)), (10 ** 3, 20 ** 1))
    Axes.annotate(r'$R^2$: ' + str(round(R2, 4)), (10 ** 3, 10 ** 1))
    Axes.set_xlabel('Observed $\mathbb{S}_{xy}$')
    Axes.set_ylabel(r'Fitted $\mathbb{S}_{xy}$')
    plt.xscale('log')
    plt.yscale('log')
    plt.legend(loc='upper left')
    plt.show()

    Indices = Data[Data['Group'] == 'Control'].index
    Figure, Axes = plt.subplots(1, 1, figsize=(5.5, 4.5), dpi=500, sharey=True, sharex=True)
    Axes.plot(Data[Y], Y_Fit, alpha=0.1,
              color=(0, 0, 0), linestyle='none', marker='o')
    Axes.plot(Data[Y][Indices], Sii[:len(Indices)],
              color=(0, 0, 1), linestyle='none', marker='o', label=r'$\lambda_{ii}$')
    Axes.plot(Data[Y][Indices], Sij[:len(Indices)],
              color=(0, 1, 0), linestyle='none', marker='o', label=r'$\lambda_{ij}$')
    Axes.plot(Data[Y][Indices], Sjj[:len(Indices)],
              color=(1, 0, 0), linestyle='none', marker='o', label=r'$\mu_{ij}$')
    Axes.plot(np.sort(Line), np.sort(CI_Line_u), color=(0.4, 0.4, 0.4), linestyle='--')
    Axes.plot(np.sort(Line), np.sort(CI_Line_o), color=(0.4, 0.4, 0.4), linestyle='--')
    Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
    Axes.set_xlabel('Observed $\mathbb{S}_{xy}$')
    Axes.set_ylabel(r'Fitted $\mathbb{S}_{xy}$')
    plt.xscale('log')
    plt.yscale('log')
    plt.legend(loc='upper left')
    plt.show()

    Indices = Data[Data['Group'] == 'Test'].index
    Figure, Axes = plt.subplots(1, 1, figsize=(5.5, 4.5), dpi=100, sharey=True, sharex=True)
    Axes.plot(Data[Y], Y_Fit, alpha=0.1,
              color=(0, 0, 0), linestyle='none', marker='o')
    Axes.plot(Data[Y][Indices], Sii[-len(Indices):],
              color=(0, 0, 1), linestyle='none', marker='o', label=r'$\lambda_{ii}$')
    Axes.plot(Data[Y][Indices], Sij[-len(Indices):],
              color=(0, 1, 0), linestyle='none', marker='o', label=r'$\lambda_{ij}$')
    Axes.plot(Data[Y][Indices], Sjj[-len(Indices):],
              color=(1, 0, 0), linestyle='none', marker='o', label=r'$\mu_{ij}$')
    Axes.plot(np.sort(Line), np.sort(CI_Line_u), color=(0.4, 0.4, 0.4), linestyle='--')
    Axes.plot(np.sort(Line), np.sort(CI_Line_o), color=(0.4, 0.4, 0.4), linestyle='--')
    Axes.plot(Line, Line, color=(0, 0, 0), linestyle='--')
    Axes.set_xlabel('Observed $\mathbb{S}_{xy}$')
    Axes.set_ylabel(r'Fitted $\mathbb{S}_{xy}$')
    plt.xscale('log')
    plt.yscale('log')
    plt.legend(loc='upper left')
    plt.show()

    return