def fit_gamma(samples): samples = [double(n) for n in samples if n > 0]#because rpy does not like longs! r.library('MASS') f = r.fitdistr(samples,'gamma') shap,rat = f['estimate']['shape'],f['estimate']['rate'] qp = r.qgamma(r.ppoints(samples),shape=shap,rate=rat) return qp,shape,rat
def fit_weibull(samples): #samples = [double(n) for n in samples if n > 0]#because rpy does not like longs! r.library('MASS') f = r.fitdistr(samples,'weibull') sc,sh = f['estimate']['scale'],f['estimate']['shape'] qp = r.qweibull(r.ppoints(samples),scale=sc,shape=sh) return qp,sc,sh
def fit_weibull(samples): #samples = [double(n) for n in samples if n > 0]#because rpy does not like longs! r.library('MASS') f = r.fitdistr(samples, 'weibull') sc, sh = f['estimate']['scale'], f['estimate']['shape'] qp = r.qweibull(r.ppoints(samples), scale=sc, shape=sh) return qp, sc, sh
def fit_exponential(samples): samples = [float(n) for n in samples]#because rpy does not like longs! r.library('MASS') f = r.fitdistr(samples,'exponential') rat = f['estimate']['rate'] qp = r.qexp(r.ppoints(samples),rate=rat) return qp, rat
def fit_exponential(samples): samples = [float(n) for n in samples] #because rpy does not like longs! r.library('MASS') f = r.fitdistr(samples, 'exponential') rat = f['estimate']['rate'] qp = r.qexp(r.ppoints(samples), rate=rat) return qp, rat
def fit_gamma(samples): samples = [double(n) for n in samples if n > 0] #because rpy does not like longs! r.library('MASS') f = r.fitdistr(samples, 'gamma') shap, rat = f['estimate']['shape'], f['estimate']['rate'] qp = r.qgamma(r.ppoints(samples), shape=shap, rate=rat) return qp, shape, rat
def _ltm(_locals): '''Supports method core_R.ltm()''' class LtmError(Exception): pass # Get self self = _locals['damonObj'] coredata = dmn.tools.get_damon_datadict(self)["coredata"] #convert all nanvals to nanvals of r environment. coredata[coredata == self.nanval] = r.NAN #convert coredata into a dictionary with keys equal to items data_dict = { str(x): coredata[:, x - 1] for x in range(1, len(self["collabels"][0])) } #this statement is not necessary , BASIC_CONVERSION is default.. #rpy.set_default_mode(rpy.NO_CONVERSION) #import ltm library using r object, (to install libraries in R, you have ' #to go to R console. as much i know, there is no way to do it from python. ) r.library("ltm") #assign a object df to R environment. r.assign('df', data_dict) #convert df to r data frame to be passed to ltm function r("df = data.frame(df)") #this is the formula to be passed to ltm function, r("") is used to call #those statements which are not posssible to be called from python environment.. formula = r("df ~ " + _locals['formula_rightside']) #make the input arguments suitable for r. if _locals['na_action'] is not None: _locals['na_action'] = r[_locals['na_action']] #call r.ltm try: ltm_out = r.ltm(formula, na_action=_locals['na_action'], IRT_param=_locals['irt_param'], constraint=_locals['constraint'], start_val=_locals['start_val'], control=_locals['control']) except: exc = 'Unable to find r.ltm(). Make sure the package resides in the R library.' raise LtmError(exc) return ltm_out
def _grm(_locals): '''Supports method core_R.grm()''' # Get self self = _locals['damonObj'] coredata = dmn.tools.get_damon_datadict(self)["coredata"] #convert all nanvals to nanvals of r environment. coredata[coredata == self.nanval] = r.NAN #convert coredata into a dictionary with keys equal to items data_dict = { str(x): coredata[:, x - 1] for x in range(1, len(self["collabels"][0])) } #this statement is not necessary , BASIC_CONVERSION is default.. rpy.set_default_mode(rpy.NO_CONVERSION) #import ltm library using r object, (to install libraries in R, you have to go to R console. as much i know, there is no way to do it from python. ) r.library("ltm") #make the input arguments suitable for r. if _locals['na_action'] != None: _locals['na_action'] = r[_locals['na_action']] #assign a object df to R environment. r.assign('df', data_dict) print _locals['control'] #convert df to r data frame to be passed to ltm function r("df = data.frame(df)") #get the robj of data frame created in r. r_data_frame = r("df") #call r.ltm grm_out = r.grm(r_data_frame, constrained=_locals['constrained'], na_action=_locals['na_action'], IRT_param=_locals['irt_param'], Hessian=_locals['hessian'], start_val=_locals['start_val'], control=_locals['control']) return grm_out
def fit_nbinom(samples): r.library('MASS') f = r.fitdistr(samples,'negative binomial') s,m = f['estimate']['size'],f['estimate']['mu'] qp = r.qnbinom(r.ppoints(samples),size=s,mu=m) return qp,s,m
def fit_poisson(samples): r.library('MASS') f = r.fitdistr(samples,'poisson') l = f['estimate']['lambda'] #predicted mean qp = r.qpois(r.ppoints(samples),l) return qp,l
def fit_poisson(samples): r.library('MASS') f = r.fitdistr(samples, 'poisson') l = f['estimate']['lambda'] #predicted mean qp = r.qpois(r.ppoints(samples), l) return qp, l
def fit_nbinom(samples): r.library('MASS') f = r.fitdistr(samples, 'negative binomial') s, m = f['estimate']['size'], f['estimate']['mu'] qp = r.qnbinom(r.ppoints(samples), size=s, mu=m) return qp, s, m