def mnl_simulate(data, coeff, numalts, GPU=False, returnprobs=False): logger.debug( 'start: MNL simulation with len(data)={} and numalts={}'.format( len(data), numalts)) atype = 'numpy' if not GPU else 'cuda' data = np.transpose(data) coeff = np.reshape(np.array(coeff), (1, len(coeff))) data, coeff = PMAT(data, atype), PMAT(coeff, atype) probs = mnl_probs(data, coeff, numalts) if returnprobs: return np.transpose(probs.get_mat()) # convert to cpu from here on - gpu doesn't currently support these ops if probs.typ == 'cuda': probs = PMAT(probs.get_mat()) probs = probs.cumsum(axis=0) r = pmat.random(probs.size() / numalts) choices = probs.subtract(r, inplace=True).firstpositive(axis=0) logger.debug('finish: MNL simulation') return choices.get_mat()
def mnl_simulate(data, coeff, numalts, GPU=False, returnprobs=True): """ Get the probabilities for each chooser choosing between `numalts` alternatives. Parameters ---------- data : 2D array The data are expected to be in "long" form where each row is for one alternative. Alternatives are in groups of `numalts` rows per choosers. Alternatives must be in the same order for each chooser. coeff : 1D array The model coefficients corresponding to each column in `data`. numalts : int The number of alternatives available to each chooser. GPU : bool, optional returnprobs : bool, optional If True, return the probabilities for each chooser/alternative instead of actual choices. Returns ------- probs or choices: 2D array If `returnprobs` is True the probabilities are a 2D array with a row for each chooser and columns for each alternative. """ logger.debug( 'start: MNL simulation with len(data)={} and numalts={}'.format( len(data), numalts)) atype = 'numpy' if not GPU else 'cuda' data = np.transpose(data) coeff = np.reshape(np.array(coeff), (1, len(coeff))) data, coeff = PMAT(data, atype), PMAT(coeff, atype) probs = mnl_probs(data, coeff, numalts) if returnprobs: return np.transpose(probs.get_mat()) # convert to cpu from here on - gpu doesn't currently support these ops if probs.typ == 'cuda': probs = PMAT(probs.get_mat()) probs = probs.cumsum(axis=0) r = pmat.random(probs.size() / numalts) choices = probs.subtract(r, inplace=True).firstpositive(axis=0) logger.debug('finish: MNL simulation') return choices.get_mat()
def mnl_loglik(beta,data,chosen,numalts,weights=None,lcgrad=False,stderr=0): numvars = beta.size numobs = data.size()/numvars/numalts beta = np.reshape(beta,(1,beta.size)) beta = PMAT(beta,data.typ) probs = mnl_probs(data,beta,numalts) if lcgrad: assert weights gradmat = weights.subtract(probs).reshape(1,probs.size()) else: gradmat = chosen.subtract(probs).reshape(1,probs.size()) gradmat = data.multiply_by_row(gradmat) # this line is a bit hackish - you can't do the whole sum at once on a gpu # need to shorten the length of the axis over which to sum gradarr = gradmat.reshape(numvars*numalts,numobs) if weights is not None and not lcgrad: gradarr = gradarr.element_multiply(weights,inplace=True) gradarr = gradarr.sum(axis=1).reshape(numvars,numalts).sum(axis=1) gradmat.reshape(numvars,numalts*numobs) if stderr: if not lcgrad: return get_standard_error(get_hessian(gradmat.get_mat())) else: return np.zeros(beta.size()) chosen.reshape(numalts,numobs) if weights is not None: loglik = (probs.log(inplace=True).element_multiply(weights,inplace=True) \ .element_multiply(chosen,inplace=True)).sum(axis=1).sum(axis=0) else: loglik = (probs.log(inplace=True).element_multiply(chosen,inplace=True)).sum(axis=1).sum(axis=0) if loglik.typ == 'numpy': loglik, gradarr = loglik.get_mat(), gradarr.get_mat() else: loglik = loglik.get_mat()[0,0] gradarr = np.reshape(gradarr.get_mat(),(1,gradarr.size()))[0] return -1*loglik, -1*gradarr
def mnl_simulate(data, coeff, numalts, GPU=0, returnprobs=0): atype = 'numpy' if not GPU else 'cuda' data = np.transpose(data) coeff = np.reshape(np.array(coeff),(1,len(coeff))) data, coeff = PMAT(data,atype), PMAT(coeff,atype) probs = mnl_probs(data,coeff,numalts) if returnprobs: return np.transpose(probs.get_mat()) # convert to cpu from here on - gpu doesn't currently support these ops if probs.typ == 'cuda': probs = PMAT(probs.get_mat()) probs = probs.cumsum(axis=0) r = pmat.random(probs.size()/numalts) choices = probs.subtract(r,inplace=True).firstpositive(axis=0) return choices.get_mat()
def mnl_simulate(data, coeff, numalts, GPU=0, returnprobs=0): atype = 'numpy' if not GPU else 'cuda' data = np.transpose(data) coeff = np.reshape(np.array(coeff), (1, len(coeff))) data, coeff = PMAT(data, atype), PMAT(coeff, atype) probs = mnl_probs(data, coeff, numalts) if returnprobs: return np.transpose(probs.get_mat()) # convert to cpu from here on - gpu doesn't currently support these ops if probs.typ == 'cuda': probs = PMAT(probs.get_mat()) probs = probs.cumsum(axis=0) r = pmat.random(probs.size() / numalts) choices = probs.subtract(r, inplace=True).firstpositive(axis=0) return choices.get_mat()