Envir01_tau_1 = (tau_1 - MODEL_Envir01) / SIGMA_STAR_Envir01 Envir01_tau_2 = (tau_2 - MODEL_Envir01) / SIGMA_STAR_Envir01 Envir01_tau_3 = (tau_3 - MODEL_Envir01) / SIGMA_STAR_Envir01 Envir01_tau_4 = (tau_4 - MODEL_Envir01) / SIGMA_STAR_Envir01 IndEnvir01 = { 1: bioNormalCdf(Envir01_tau_1), 2: bioNormalCdf(Envir01_tau_2) - bioNormalCdf(Envir01_tau_1), 3: bioNormalCdf(Envir01_tau_3) - bioNormalCdf(Envir01_tau_2), 4: bioNormalCdf(Envir01_tau_4) - bioNormalCdf(Envir01_tau_3), 5: 1 - bioNormalCdf(Envir01_tau_4), 6: 1.0, -1: 1.0, -2: 1.0 } P_Envir01 = Elem(IndEnvir01, Envir01) Envir02_tau_1 = (tau_1 - MODEL_Envir02) / SIGMA_STAR_Envir02 Envir02_tau_2 = (tau_2 - MODEL_Envir02) / SIGMA_STAR_Envir02 Envir02_tau_3 = (tau_3 - MODEL_Envir02) / SIGMA_STAR_Envir02 Envir02_tau_4 = (tau_4 - MODEL_Envir02) / SIGMA_STAR_Envir02 IndEnvir02 = { 1: bioNormalCdf(Envir02_tau_1), 2: bioNormalCdf(Envir02_tau_2) - bioNormalCdf(Envir02_tau_1), 3: bioNormalCdf(Envir02_tau_3) - bioNormalCdf(Envir02_tau_2), 4: bioNormalCdf(Envir02_tau_4) - bioNormalCdf(Envir02_tau_3), 5: 1 - bioNormalCdf(Envir02_tau_4), 6: 1.0, -1: 1.0, -2: 1.0 }
MODEL_Mobil17 = INTER_Mobil17 + B_Mobil17_F1 * CARLOVERS SIGMA_STAR_Envir01 = Beta('SIGMA_STAR_Envir01', 1, 0, None, 0) SIGMA_STAR_Envir02 = Beta('SIGMA_STAR_Envir02', 1, 0, None, 0) SIGMA_STAR_Envir03 = Beta('SIGMA_STAR_Envir03', 1, 0, None, 0) SIGMA_STAR_Mobil11 = Beta('SIGMA_STAR_Mobil11', 1, 0, None, 0) SIGMA_STAR_Mobil14 = Beta('SIGMA_STAR_Mobil14', 1, 0, None, 0) SIGMA_STAR_Mobil16 = Beta('SIGMA_STAR_Mobil16', 1, 0, None, 0) SIGMA_STAR_Mobil17 = Beta('SIGMA_STAR_Mobil17', 1, 0, None, 0) # We build a dict with each contribution to the loglikelihood if # (var > 0) and (var < 6). If not, 0 is returned. F = {} F['Envir01'] = Elem({0: 0, 1: ll.loglikelihoodregression(Envir01, MODEL_Envir01, SIGMA_STAR_Envir01)}, (Envir01 > 0)*(Envir01 < 6)) F['Envir02'] = Elem({0: 0, 1: ll.loglikelihoodregression(Envir02, MODEL_Envir02, SIGMA_STAR_Envir02)}, (Envir02 > 0)*(Envir02 < 6)) F['Envir03'] = Elem({0: 0, 1: ll.loglikelihoodregression(Envir03, MODEL_Envir03, SIGMA_STAR_Envir03)}, (Envir03 > 0)*(Envir03 < 6)) F['Mobil11'] = Elem({0: 0, 1: ll.loglikelihoodregression(Mobil11, MODEL_Mobil11,
U = B_TIME * TRAIN_TT_SCALED + B_COST * TRAIN_COST_SCALED # Associate each discrete indicator with an interval. # 1: -infinity -> tau1 # 2: tau1 -> tau2 # 3: tau2 -> +infinity ChoiceProba = { 1: 1 - dist.logisticcdf(U - tau1), 2: dist.logisticcdf(U - tau1) - dist.logisticcdf(U - tau2), 3: dist.logisticcdf(U - tau2) } # Definition of the model. This is the contribution of each # observation to the log likelihood function. logprob = log(Elem(ChoiceProba, CHOICE)) # Define level of verbosity logger = msg.bioMessage() logger.setSilent() #logger.setWarning() #logger.setGeneral() #logger.setDetailed() # Create the Biogeme object biogeme = bio.BIOGEME(database, logprob) biogeme.modelName = '18ordinalLogit' # Estimate the parameters results = biogeme.estimate() pandasResults = results.getEstimatedParameters()
TRAIN_COST_SCALED = DefineVariable('TRAIN_COST_SCALED',\ TRAIN_COST / 100,database) SM_TT_SCALED = DefineVariable('SM_TT_SCALED', SM_TT / 100.0, database) SM_COST_SCALED = DefineVariable('SM_COST_SCALED', SM_COST / 100, database) CAR_TT_SCALED = DefineVariable('CAR_TT_SCALED', CAR_TT / 100, database) CAR_CO_SCALED = DefineVariable('CAR_CO_SCALED', CAR_CO / 100, database) # We estimate a binary probit model. There are only two alternatives. V1 = B_TIME * TRAIN_TT_SCALED + \ B_COST * TRAIN_COST_SCALED V3 = ASC_CAR + \ B_TIME * CAR_TT_SCALED + \ B_COST * CAR_CO_SCALED # Associate choice probability with the numbering of alternatives P = {1: bioNormalCdf(V1 - V3), 3: bioNormalCdf(V3 - V1)} prob = Elem(P, CHOICE) class test_02(unittest.TestCase): def testEstimation(self): biogeme = bio.BIOGEME(database, log(prob)) results = biogeme.estimate() self.assertAlmostEqual(results.data.logLike, -986.1888, 2) if __name__ == '__main__': unittest.main()
CAR_CO_SCALED = DefineVariable('CAR_CO_SCALED', CAR_CO / 100, database) # Definition of the utility functions # We estimate a binary probit model. There are only two alternatives. V1 = B_TIME * TRAIN_TT_SCALED + \ B_COST * TRAIN_COST_SCALED V3 = ASC_CAR + \ B_TIME * CAR_TT_SCALED + \ B_COST * CAR_CO_SCALED # Associate choice probability with the numbering of alternatives P = {1: bioNormalCdf(V1 - V3), 3: bioNormalCdf(V3 - V1)} # Definition of the model. This is the contribution of each # observation to the log likelihood function. logprob = log(Elem(P, CHOICE)) # Define level of verbosity logger = msg.bioMessage() logger.setSilent() #logger.setWarning() #logger.setGeneral() #logger.setDetailed() # Create the Biogeme object biogeme = bio.BIOGEME(database, logprob) biogeme.modelName = '21probit' # Estimate the parameters results = biogeme.estimate() pandasResults = results.getEstimatedParameters()
# Associate utility functions with the numbering of alternatives V = {1: V1, 2: V2, 3: V3} # Associate the availability conditions with the alternatives CAR_AV_SP = DefineVariable('CAR_AV_SP',CAR_AV * ( SP != 0 ),database) TRAIN_AV_SP = DefineVariable('TRAIN_AV_SP',TRAIN_AV * ( SP != 0 ),database) av = {1: TRAIN_AV_SP, 2: SM_AV, 3: CAR_AV_SP} # The choice model is a logit, with availability conditions prob1 = Elem({0:0,1:models.logit(V,av,1)},av[1]) # Elasticities can be computed. We illustrate below two # formulas. Check in the output file that they produce the same # result. # First, the general definition of elasticities. This illustrates the # use of the Derive expression, and can be used with any model, # however complicated it is. Note the quotes in the Derive opertor. genelas1 = Derive(prob1,'TRAIN_TT') * TRAIN_TT / prob1 # Second, the elasticity of logit models. See Ben-Akiva and Lerman for # the formula logitelas1 = TRAIN_AV_SP * (1.0 - prob1) * TRAIN_TT_SCALED * B_TIME