Example #1
0
print(stdresiduals[:5])

# %%
# Similarly, we can also obtain the underyling distribution characterizing the residuals:

# %%
print(result.getNoiseDistribution())

# %%
# ANOVA table
# -----------
#
# In order to post-process the linear regression results, the `LinearModelAnalysis` class can be used:

# %%
analysis = ot.LinearModelAnalysis(result)
print(analysis)

# %%
# The results seem to indicate that the linear hypothesis can be accepted. Indeed, the `R-Squared` value is nearly `1`. Furthermore, the adjusted value, which takes into account the data set size and the number of hyperparameters, is similar to `R-Squared`.
#
# We can also notice that the `Fisher-Snedecor` and `Student` p-values detailed above are lower than 1%. This ensures an acceptable quality of the linear model.

# %%
# Graphical analyses
# ------------------
#
# Let us compare model and fitted values:

# %%
graph = analysis.drawModelVsFitted()
Example #2
0
ot.TESTPREAMBLE()

# lm build
print("Fit y ~ 3 - 2 x + 0.05 * sin(x) model using 20 points (sin(x) ~ noise)")
size = 20
oneSample = ot.Sample(size, 1)
twoSample = ot.Sample(size, 1)
for i in range(size):
    oneSample[i, 0] = 7.0 * sin(-3.5 + (6.5 * i) / (size - 1.0)) + 2.0
    twoSample[i,
              0] = -2.0 * oneSample[i, 0] + 3.0 + 0.05 * sin(oneSample[i, 0])

test = ot.LinearModelAlgorithm(oneSample, twoSample)
result = ot.LinearModelResult(test.getResult())
analysis = ot.LinearModelAnalysis(result)
print(analysis)
# Compute confidence level (95%) for coefficients estimate
alpha = 0.95
# interval confidence bounds
interval = analysis.getCoefficientsConfidenceInterval(alpha)
print("confidence intervals with level=%1.2f : %s" % (alpha, interval))

print("")
print("")
print("")
print("Fit y ~ 1 + 0.1 x + 10 x^2 model using 100 points")
ot.RandomGenerator.SetSeed(0)
size = 100
# Define a linespace from 0 to 10 with size points
# We use a Box expermient ==> remove 0 & 1 points
Example #3
0
penalty_BIC = log(X.getSize())
penalty_AIC = 2.
maxiteration = 1000

for k in [penalty_AIC, penalty_BIC]:
    # Forward / Backward
    if k == penalty_AIC:
        IC = " AIC "
    if k == penalty_BIC:
        IC = " BIC "
    for direction in [
            ot.LinearModelStepwiseAlgorithm.FORWARD,
            ot.LinearModelStepwiseAlgorithm.BACKWARD,
            ot.LinearModelStepwiseAlgorithm.BOTH
    ]:
        algo = ot.LinearModelStepwiseAlgorithm(
            X, basis, Y, i_min, direction,
            i_0 if direction == ot.LinearModelStepwiseAlgorithm.BOTH else [])
        algo.setPenalty(k)
        algo.setMaximumIterationNumber(maxiteration)
        algo_result = ot.LinearModelAnalysis(algo.getResult())
        print("{0:~^60s}".format(""))
        if direction == ot.LinearModelStepwiseAlgorithm.FORWARD:
            print(" Forward " + IC)
        elif direction == ot.LinearModelStepwiseAlgorithm.BACKWARD:
            print(" Backward " + IC)
        else:
            print(" Both " + IC)
        print("{0:~^60s}".format(""))
        print(algo_result)
Example #4
0
# %%
# Generate a particular scalar sampleX
particularXSample = ot.Triangular(1.0, 5.0, 10.0).getSample(N)

# %%
# Create the linear model from Y,X samples
result = ot.LinearModelAlgorithm(Xsample, Ysample).getResult()

# Get the coefficients ai
print("coefficients of the linear regression model = ",
      result.getCoefficients())

# Get the confidence intervals of the ai coefficients
print("confidence intervals of the coefficients = ",
      ot.LinearModelAnalysis(result).getCoefficientsConfidenceInterval(0.9))

# %%
# Validate the model with a visual test
graph = ot.VisualTest.DrawLinearModel(Xsample, Ysample, result)
view = viewer.View(graph)

# %%
# Draw the graph of the residual values
graph = ot.VisualTest.DrawLinearModelResidual(Xsample, Ysample, result)
view = viewer.View(graph)

# %%
# Check the nullity of the regression linear model coefficients
resultLinearModelFisher = ot.LinearModelTest.LinearModelFisher(
    Xsample, Ysample, result, 0.10)
Sample.setName("LifeCycleSavings")
Sample.setDescription(["sr","pop15","pop75","dpi","ddpi"])

sr    = Sample[:,0]
pop15 = Sample[:,1]
pop75 = Sample[:,2]
dpi   = Sample[:,3]
ddpi  = Sample[:,4]

# model1
outputSample = Sample[:,0]
inputSample = Sample[:,1:5]

algo1 = ot.LinearModelAlgorithm(inputSample, outputSample)
result1 = algo1.getResult()
analysis1 = ot.LinearModelAnalysis(algo1.getResult())

for plot in ["drawResidualsVsFitted", "drawScaleLocation", "drawQQplot", "drawCookDistance", "drawResidualsVsLeverages", "drawCookVsLeverages"]:
    graph = getattr(analysis1, plot)()
    graph.draw("model1-"+plot, 640, 480)

# plot of residuals versus fitted values
graph = analysis1.drawResidualsVsFitted()
View(graph)

# scale-location plot of sqrt(|residuals|) versus fitted values
graph = analysis1.drawScaleLocation()
View(graph)

# Normal quantiles-quantiles plot of standardized residuals
graph = analysis1.drawQQplot()
Example #6
0
algo_both.setPenalty(penalty)
algo_both.run()
result_both = algo_both.getResult()
print('Selected basis: ', result_both.getCoefficientsNames())
print('R-squared = ', result_both.getRSquared())
print('Adjusted R-squared = ', result_both.getAdjustedRSquared())

# %%
# Graphical analyses
# ------------------
#
# Finally, we can rely on the LinearModelAnalysis class in order to analyse
# the predictive differences between the obtained models.

# %%
analysis_full = ot.LinearModelAnalysis(result_full)
analysis_full.setName('Full model')
analysis_forward = ot.LinearModelAnalysis(result_forward)
analysis_forward.setName('Forward selection')
analysis_backward = ot.LinearModelAnalysis(result_backward)
analysis_backward.setName('Backward selection')
fig = plt.figure(figsize=(12, 8))
for k, analysis in enumerate(
    [analysis_full, analysis_forward, analysis_backward]):
    graph = analysis.drawModelVsFitted()
    ax = fig.add_subplot(3, 1, k + 1)
    ax.set_title(analysis.getName(), fontdict={'fontsize': 16})
    graph.setXTitle('Exact values')
    ax.xaxis.label.set_size(12)
    ax.yaxis.label.set_size(14)
    graph.setTitle('')