def _cal_δ(self, θ2): """Calculate δ (mean utility) via contraction mapping""" v, D, X2 = self.v, self.D, self.X2 nmkts, nsiminds, nbrands = self.nmkts, self.nsiminds, self.nbrands δ, ln_s_jt = self.δ, self.ln_s_jt # initial values niter = 0 ε = 1e-13 # tight tolerance μ = self.μ = _BLP.cal_mu(θ2, v.values, D.values, X2.values) while True: s = self._cal_s(δ, μ) #_BLP.cal_s(δ, μ, s) # s gets updated diff = ln_s_jt - np.log(s) if np.isnan(diff).sum(): raise Exception('nan in diffs') δ += diff if (abs(diff).max() < ε) and (abs(diff).mean() < 1e-3): break niter += 1 print('contraction mapping finished in {} iterations'.format(niter)) return δ
def cal_δ(self, θ2): """Calculate δ (mean utility) via contraction mapping""" v, D, X2 = self.v, self.D, self.X2 nmkt, nsimind, nbrand = self.nmkt, self.nsimind, self.nbrand s, δ, ln_s_jt = self.s, self.δ_old, self.ln_s_jt θ2_v, θ2_D = θ2[:, 0], θ2[:, 1:] niter = 0 μ = _BLP.cal_mu(θ2_v, θ2_D, v, D, X2, nmkt, nsimind, nbrand) while True: exp_Xb = np.exp(δ.reshape(-1, 1) + μ) _BLP.cal_s(exp_Xb, nmkt, nsimind, nbrand, s) # s gets updated diff = ln_s_jt - np.log(s) if np.isnan(diff).sum(): raise Exception('nan in diffs') δ += diff if (abs(diff).max() < self.etol) and (abs(diff).mean() < 1e-3): break niter += 1 print('contraction mapping finished in {} iterations'.format(niter)) return δ
def test_cal_mu(data): BLP = pyBLP.BLP(data) v, D, X2 = BLP.v, BLP.D, BLP.X2 θ20 = np.array([[0.3772, 3.0888, 0, 1.1859, 0], [1.8480, 16.5980, -.6590, 0, 11.6245], [-0.0035, -0.1925, 0, 0.0296, 0], [0.0810, 1.4684, 0, -1.5143, 0]]) mu_python = BLP._cal_mu(θ20) mu_cython = _BLP.cal_mu(θ20, v.values, D.values, X2.values) assert np.allclose(mu_python, mu_cython)
def _cal_jacobian(self, θ2, δ): """calculate the Jacobian with the current value of δ""" v, D, X2 = self.v, self.D, self.X2 nmkts, nsiminds, nbrands = self.nmkts, self.nsiminds, self.nbrands ind_choice_prob = self.ind_choice_prob μ = _BLP.cal_mu(θ2, v.values, D.values, X2.values) _BLP.cal_ind_choice_prob(δ, μ, ind_choice_prob) ind_choice_prob_vec = ind_choice_prob.transpose(0, 2, 1).reshape(-1, nsiminds) nk = len(X2.coords['vars']) nD = len(D.coords['vars']) f1 = np.zeros((δ.flatten().shape[0], nk * (nD + 1))) # cdid relates each observation to the market it is in cdid = np.arange(nmkts).repeat(nbrands) cdindex = np.arange(nbrands, nbrands * (nmkts + 1), nbrands) - 1 # compute ∂share/∂σ for k in range(nk): X2v = X2[..., k].values.reshape(-1, 1) @ np.ones((1, nsiminds)) X2v *= v[cdid, :, k].values temp = (X2v * ind_choice_prob_vec).cumsum(axis=0) sum1 = temp[cdindex, :] sum1[1:, :] = sum1[1:, :] - sum1[:-1, :] f1[:, k] = (ind_choice_prob_vec * (X2v - sum1[cdid, :])).mean(axis=1) # compute ∂share/∂pi for d in range(nD): tmpD = D[cdid, :, d].values temp1 = np.zeros((cdid.shape[0], nk)) for k in range(nk): X2d = X2[..., k].values.reshape(-1, 1) @ np.ones((1, nsiminds)) * tmpD temp = (X2d * ind_choice_prob_vec).cumsum(axis=0) sum1 = temp[cdindex, :] sum1[1:, :] = sum1[1:, :] - sum1[:-1, :] temp1[:, k] = (ind_choice_prob_vec * (X2d - sum1[cdid, :])).mean(axis=1) f1[:, nk * (d + 1):nk * (d + 2)] = temp1 # compute ∂δ/∂θ2 rel = np.nonzero(θ2.T.ravel())[0] jacob = np.zeros((cdid.shape[0], rel.shape[0])) n = 0 for i in range(cdindex.shape[0]): temp = ind_choice_prob_vec[n:cdindex[i] + 1, :] H1 = temp @ temp.T H = (np.diag(temp.sum(axis=1)) - H1) / nsiminds jacob[n:cdindex[i] + 1, :] = - solve(H, f1[n:cdindex[i] + 1, rel]) n = cdindex[i] + 1 return jacob
def cal_jacobian(self, θ2, δ): """calculate the Jacobian with the current value of δ""" v, D, X2 = self.v, self.D, self.X2 nmkt, nsimind, nbrand = self.nmkt, self.nsimind, self.nbrand μ = _BLP.cal_mu( θ2[:, 0], θ2[:, 1:], v, D, X2, nmkt, nsimind, nbrand) exp_Xb = np.exp(δ.reshape(-1, 1) + μ) ind_choice_prob = _BLP.cal_ind_choice_prob( exp_Xb, nmkt, nsimind, nbrand) nk = X2.shape[1] nD = θ2.shape[1] - 1 f1 = np.zeros((δ.shape[0], nk * (nD + 1))) # cdid relates each observation to the market it is in cdid = np.arange(nmkt).repeat(nbrand) cdindex = np.arange(nbrand, nbrand * (nmkt + 1), nbrand) - 1 # compute (partial share) / (partial sigma) for k in range(nk): xv = X2[:, k].reshape(-1, 1) @ np.ones((1, nsimind)) xv *= v[cdid, nsimind * k:nsimind * (k + 1)] temp = (xv * ind_choice_prob).cumsum(axis=0) sum1 = temp[cdindex, :] sum1[1:, :] = sum1[1:, :] - sum1[:-1, :] f1[:, k] = (ind_choice_prob * (xv - sum1[cdid, :])).mean(axis=1) # If no demogr comment out the next part # computing (partial share)/(partial pi) for d in range(nD): tmpD = D[cdid, nsimind * d:nsimind * (d + 1)] temp1 = np.zeros((cdid.shape[0], nk)) for k in range(nk): xd = X2[:, k].reshape(-1, 1) @ np.ones((1, nsimind)) * tmpD temp = (xd * ind_choice_prob).cumsum(axis=0) sum1 = temp[cdindex, :] sum1[1:, :] = sum1[1:, :] - sum1[0:-1, :] temp1[:, k] = (ind_choice_prob * (xd-sum1[cdid, :])).mean(axis=1) f1[:, nk * (d + 1):nk * (d + 2)] = temp1 # computing (partial delta)/(partial theta2) rel = np.nonzero(θ2.T.ravel())[0] jacob = np.zeros((cdid.shape[0], rel.shape[0])) n = 0 for i in range(cdindex.shape[0]): temp = ind_choice_prob[n:cdindex[i] + 1, :] H1 = temp @ temp.T H = (np.diag(temp.sum(axis=1)) - H1) / self.nsimind jacob[n:cdindex[i] + 1, :] = - solve(H, f1[n:cdindex[i] + 1, rel]) n = cdindex[i] + 1 return jacob