def test_lomax_pdf(): """Test if the pdf is the same as reported by scipy.""" alpha = np.array([1., 1.5, 2.]) beta = np.array([1., 2., 3.]) nSamples = 1000 nParameters = alpha.shape[0] parameters = {"alpha": tf.constant(alpha), "beta": tf.constant(beta)} # test positive values data = np.random.random((nSamples, nParameters)) tfData = tf.constant(data) probs = LomaxAlgorithms.pdf(parameters=parameters, data=tfData) with tf.Session() as sess: probs = sess.run(probs) assert (probs.shape == (nSamples, nParameters)) spProbs = sp.stats.lomax(c=alpha, scale=beta).pdf(data) assert (np.allclose(probs, spProbs)) # test negative values data = -np.random.random((nSamples, nParameters)) tfData = tf.constant(data) probs = LomaxAlgorithms.pdf(parameters=parameters, data=tfData) with tf.Session() as sess: probs = sess.run(probs) assert (probs.shape == (nSamples, nParameters)) assert (np.allclose(probs, np.zeros_like(probs)))
def test_lomax_llh(): """Test if the llh is the same as reported by scipy.""" alpha = np.array([1., 1.5, 2.]) beta = np.array([1., 2., 3.]) nSamples = 1000 nParameters = alpha.shape[0] parameters = {"alpha": tf.constant(alpha), "beta": tf.constant(beta)} # test positive values data = np.random.random((nSamples, nParameters)) tfData = tf.constant(data) llh = LomaxAlgorithms.llh(parameters=parameters, data=tfData) with tf.Session() as sess: llh = sess.run(llh) assert (llh.shape == (nSamples, nParameters)) spLlh = sp.stats.lomax(c=alpha, scale=beta).logpdf(data) assert (np.allclose(llh, spLlh)) # test negative values data = -np.random.random((nSamples, nParameters)) tfData = tf.constant(data) llh = LomaxAlgorithms.llh(parameters=parameters, data=tfData) with tf.Session() as sess: llh = sess.run(llh) assert (llh.shape == (nSamples, nParameters)) assert (np.allclose(llh, -np.inf * np.ones_like(llh)))
def test_lomax_sample(): """Test whether the parameters can be recovered from many samples.""" alpha = np.array([1., 1.5, 2.]) beta = np.array([1., 2., 3.]) nParameters = alpha.shape[0] nSamples = 1000000 shape = (nSamples, nParameters) nIterations = 100 # sample from the distribution using the true parameters trueParameters = { "alpha": tf.constant(alpha), "beta": tf.constant(beta), "tau": tf.constant(np.random.random(shape)) } tfData = LomaxAlgorithms.sample(parameters=trueParameters, nSamples=nSamples) # random initialize parameter estimates parameters = { "alpha": tf.constant(np.ones(nParameters)), "beta": tf.constant(np.ones(nParameters)), "tau": tf.constant(np.ones((nSamples, nParameters))) } variables = { key: tf.get_variable(key, initializer=value) for key, value in parameters.items() } # estimate the parameters from the random sample parameterUpdate = LomaxAlgorithms.fit(parameters=variables, data=tfData) varUpdates = {} for key, var in variables.items(): varUpdates[key] = tf.assign(var, parameterUpdate[key]) with tf.Session() as sess: # initialize variables for key, var in variables.items(): sess.run(var.initializer) # update the variables for i in range(nIterations): sess.run(varUpdates) # get estimated parameters parameters = sess.run(variables) # check the estimations alphaHat = parameters["alpha"] assert (alphaHat.shape == alpha.shape) assert (np.allclose(alphaHat, alpha, atol=1e-1)) betaHat = parameters["beta"] assert (betaHat.shape == beta.shape) assert (np.allclose(betaHat, beta, atol=1e-1)) tf.reset_default_graph()
def fitLatents(cls, parameters: Dict[str, Tensor], data: Tensor) -> Dict[str, Tensor]: params = cls.getParameters(parameters=parameters) b, cenNnElasticnetParams, exponentialParams, lomaxParams = params lomaxParamsUp = LomaxAlgorithms.fitLatents(lomaxParams, data) lomaxParams["tau"] = lomaxParamsUp["tau"] updatedParams = {"tauLomax": lomaxParams["tau"]} return (updatedParams)
def pdf(cls, parameters: Dict[str, Tensor], data: Tensor) -> Tensor: params = cls.getParameters(parameters=parameters) b, cenNnElasticnetParams, exponentialParams, lomaxParams = params pdfLomax = LomaxAlgorithms.pdf(lomaxParams, data) pdfExponential = ExponentialAlgorithms.pdf(exponentialParams, data) pdfElasticNet = CenNnElasticNetAlgorithms.pdf(cenNnElasticnetParams, data) b = b[None] * tf.ones_like(pdfElasticNet) pdf = tf.where(tf.equal(b, 0.), pdfElasticNet, tf.where(tf.equal(b, 1.), pdfExponential, pdfLomax)) return (pdf)
def llh(cls, parameters: Dict[str, Tensor], data: tf.Tensor) -> float: params = cls.getParameters(parameters=parameters) b, cenNnElasticnetParams, exponentialParams, lomaxParams = params llhLomax = LomaxAlgorithms.llh(lomaxParams, data) llhExponential = ExponentialAlgorithms.llh(exponentialParams, data) llhElasticNet = CenNnElasticNetAlgorithms.llh(cenNnElasticnetParams, data) b = b[None] * tf.ones_like(llhElasticNet) llh = tf.where(tf.equal(b, 0.), llhElasticNet, tf.where(tf.equal(b, 1.), llhExponential, llhLomax)) return (llh)
def sample(cls, parameters: Dict[str, Tensor], nSamples: Tensor) -> Tensor: params = cls.getParameters(parameters=parameters) b, cenNnElasticnetParams, exponentialParams, lomaxParams = params rLomax = LomaxAlgorithms.sample(lomaxParams, nSamples) rExponential = ExponentialAlgorithms.sample(exponentialParams, nSamples) rElasticNet = CenNnElasticNetAlgorithms.sample(cenNnElasticnetParams, nSamples) b = b * tf.ones_like(rLomax) r = tf.where(tf.equal(b, 0.), rElasticNet, tf.where(tf.equal(b, 1.), rExponential, rLomax)) return (r)
def test_lomax_mode(): """Test if the mode is equal to `mu`.""" alpha = np.array([1., 1.5, 2.]) beta = np.array([1., 2., 3.]) nParameters = alpha.shape[0] trueParameters = {"alpha": tf.constant(alpha), "beta": tf.constant(beta)} mode = LomaxAlgorithms.mode(parameters=trueParameters) with tf.Session() as sess: mode = sess.run(mode) assert (mode.shape == (nParameters, )) assert (np.all(mode == np.zeros_like(alpha)))
def fit(cls, parameters: Dict[str, Tensor], data: tf.Tensor) -> Dict[str, Tensor]: params = cls.getParameters(parameters=parameters) b, cenNnElasticnetParams, exponentialParams, lomaxParams = params cenNnElasticnetParams = CenNnElasticNetAlgorithms.fit( cenNnElasticnetParams, data) exponentialParams = ExponentialAlgorithms.fit(exponentialParams, data) lomaxParams = LomaxAlgorithms.fit(lomaxParams, data) cenNnElasticnetLlh = CenNnElasticNetAlgorithms.llh( cenNnElasticnetParams, data) cenNnElasticnetLlh = tf.reduce_mean(cenNnElasticnetLlh, axis=0) exponentialLlh = ExponentialAlgorithms.llh(exponentialParams, data) exponentialLlh = tf.reduce_mean(exponentialLlh, axis=0) lomaxLlh = LomaxAlgorithms.llh(lomaxParams, data) lomaxLlh = tf.reduce_mean(lomaxLlh, axis=0) condElasticNet = tf.logical_and(cenNnElasticnetLlh > lomaxLlh, cenNnElasticnetLlh > exponentialLlh) condExponential = exponentialLlh > lomaxLlh b = tf.where( condElasticNet, tf.zeros_like(cenNnElasticnetLlh), tf.where(condExponential, tf.ones_like(exponentialLlh), 2. * tf.ones_like(lomaxLlh))) updatedParameters = { "b": b, "mu": cenNnElasticnetParams["mu"], "tau": cenNnElasticnetParams["tau"], "betaExponential": exponentialParams["beta"], "alpha": lomaxParams["alpha"], "beta": lomaxParams["beta"], "tauLomax": lomaxParams["tau"] } return (updatedParameters)
def fitLatents(cls, parameters: Dict[str, Tensor], data: Tensor) -> Dict[str, Tensor]: parameters = LomaxAlgorithms.fitLatents(parameters=parameters, data=tf.abs(data)) return (parameters)
def llh(cls, parameters: Dict[str, Tensor], data: tf.Tensor) -> Tensor: llh = LomaxAlgorithms.llh(parameters=parameters, data=tf.abs(data)) - np.log(2.) return (llh)
def pdf(cls, parameters: Dict[str, Tensor], data: Tensor) -> Tensor: pdf = LomaxAlgorithms.pdf(parameters=parameters, data=tf.abs(data)) / 2. return (pdf)