def get_log_likelihood(self): s2 = numpy.power(self.s_y,2) #L = ( 1.0/numpy.sqrt(2*numpy.pi*s2) ) ** self.nobs * numpy.exp( -self.ssr/(s2*2.0) ) # logging.debug(( 1.0/numpy.sqrt(2*numpy.pi*s2) ) ** self.nobs) # logging.debug(numpy.exp( -self.ssr/(s2*2.0) )) # return self.nobs*( 1.0/numpy.sqrt(2*numpy.pi*s2))-(self.ssr/(s2*2.0)) return (-numpy.log(2*numpy.pi)*self.nobs/2)-\ (numpy.log(s2)*self.nobs/2)-(self.ssr/(s2*2.0))
def get_confidence_interval_for_mean(self,X=[]): """ Calculates the confidence interval for each datapoint, given a model fit This is the confidence interval of the model, not the prediction interval """ if isinstance(X,pd.core.frame.DataFrame): X=X[self.independent_] df_results=pd.DataFrame({'y_hat':numpy.zeros(X.shape[0])}) y_hat=self.predict(X) w=numpy.matrix(X) # XT_X=numpy.matrix(X).T*\ # numpy.matrix(X) #print "X_XT" #print X_XT # print "w" # print numpy.shape(w) # print "XT_T" # print numpy.shape(XT_X) #logging.debug(numpy.shape(s_2*inv(XT_X))) s_c_2=numpy.array(w*numpy.power(self.s_y,2)*inv(self.X_dash_X)*w.T) #logging.debug("s_c_2: {}".format(s_c_2)) #we only want the diagonal s_c_2=numpy.diagonal(s_c_2) #logging.debug("s_c_2 diag: {}".format(s_c_2)) #tau=df_new.apply(lambda x:numpy.matrix(x[est.params.index.values].values),axis=1) # X_XT*numpy.matrix(x[est.params.index.values].values).T) # tau=numpy.matrix(df_new[est.params.index.values].values[])*X_XT*\ # numpy.matrix(df_new[est.params.index.values].values).T #print "tau" #print numpy.shape(numpy.squeeze(tau)) #95% confidence interval so alpha =0.95 alpha=0.05 t_val=stats.t.ppf(1-alpha/2,self.df_resid+1) upper=y_hat+t_val*numpy.sqrt(s_c_2) lower=y_hat-t_val*numpy.sqrt(s_c_2) # df_orig['s_c_2']=s_c_2 # #df_orig['sigma_tilde']=sigma_tilde # df_orig['t']=t_val # df_orig['upper_y_hat']=upper # df_orig['lower_y_hat']=lower df=pd.DataFrame({'y_hat':y_hat,'upper_mean':upper,'lower_mean':lower}) return (df)
def test_conf_interval_for_coefs(): from ml_ext import examples (coefs,df)=examples.gen_simplemodel_data(n=50,k=1) #print(df.head()) #df.sort('X0',inplace=True) lr=LinModel() X=df[df.columns[df.columns!='y']] y=df.y lr.fit(X=X,y=y) ci=lr.get_confidence_intervals_for_coefs() # print?(lr.se) import statsmodels.api as sm re = sm.OLS(y, X).fit() rmse=0 for indx in ci.index.values: # print(re.conf_int().ix[indx,0]) # print(ci.ix[indx,'lower']) rmse=rmse+numpy.power((re.conf_int().ix[indx,0]-ci.ix[indx,'lower'])/ci.ix[indx,'b'],2) assert 100*rmse/ci.shape[0]<0.1 print("Error on confidence interval: {} %".format(100*rmse))
def fit(self, X, y, n_jobs=1): """ y can be series or array X can be dataframe or ndarry (N datapoints x M features) """ self = super(LinModel, self).fit(X, y, n_jobs) self.nobs=X.shape[0] self.nparams=X.shape[1] #remove an extra 1 for the alpha (k-1) self.df_model=X.shape[1]-1 #(n-k-1) - we always assume an alpha is present self.df_resid=self.nobs-X.shape[1]-1 #standard error of the regression y_bar=y.mean() y_hat=self.predict(X) self.raw_data=X self.training=y # logging.debug(X) self.fittedvalues=y_hat #explained sum of squares SSE=numpy.sum([numpy.power(val-y_bar,2) for val in y_hat]) e=numpy.matrix(y-y_hat).T self.resid=numpy.ravel(e) # logging.debug(y_bar) # logging.debug(y) SST=numpy.sum([numpy.power(val-y_bar,2) for val in y]) SSR=numpy.sum([numpy.power(x,2) for x in e]) self.ssr=SSR #print(SSR) #mean squared error of the residuals (unbiased) #square root of this is the standard error of the regression s_2 = SSR / (self.df_resid+1) self.s_y=numpy.sqrt(s_2) self.RMSE_pc=metrics.get_RMSE_pc(y,y_hat) # logging.debug("s_y = {}".format(self.s_y)) #Also get the means of the independent variables if isinstance(X,pd.core.frame.DataFrame): #assume its' called alpha self.X_bar=X[X.columns[X.columns!='alpha']].mean() Z=numpy.matrix(X[X.columns[X.columns!='alpha']]) else: #assume its the first column self.X_bar=numpy.mean(X.values,axis=0)[1:] Z=numpy.matrix(X[:,1:]) i_n=numpy.matrix(numpy.ones(self.nobs)) M_0=numpy.matrix(numpy.eye(self.nobs))-numpy.power(self.nobs,-1)*i_n*i_n.T self.Z_M_Z=Z.T*M_0*Z # #print(numpy.sqrt(numpy.diagonal(sse * numpy.linalg.inv(numpy.dot(X.T, X))))) # #standard error of estimator bk X_mat=numpy.matrix(X.values) #print(X_mat) self.X_dash_X=X_mat.T*X_mat # we get nans using this approach so calculate each one separately # se=numpy.zeros(self.nparams) # for ii in range(self.nparams): # se[ii]=numpy.sqrt(X_dash_X[ii,ii]*s_2) # logging.debug(s_2) # logging.debug(numpy.linalg.inv(X_dash_X)) # #se = numpy.sqrt(numpy.diagonal(s_2 * numpy.linalg.inv(numpy.matrix(X.T, X)))) se=numpy.sqrt(numpy.diagonal(s_2 * numpy.linalg.inv(self.X_dash_X))) self.se= se self.t = self.coef_ / se self.p = 2 * (1 - stats.t.cdf(numpy.abs(self.t), y.shape[0] - X.shape[1])) self.independent_ = [] if isinstance(X,pd.DataFrame): self.independent_=X.columns.values #t_val=stats.t.ppf(1-0.05/2,y.shape[0] - X.shape[1]) #R2 - 1-SSR/SST self.rsquared=1-SSR/SST #adjusted r2 #1-[(1-R2)(n-1)/(n-k-1)] self.rsquared_adj=1-(((1-self.rsquared)*(self.nobs-1))/self.df_resid) #f-value f_value=(self.rsquared/(self.df_model))/\ ((1-self.rsquared)/(self.df_resid+1)) self.f_stat=f_value self.f_pvalue=stats.f.pdf(f_value,self.df_model,self.df_resid+1)