def testEstimate(self): #Lets set up a simple model based on normal dist abcParams = ABCParameters() epsilonArray = numpy.array([0.5, 0.2, 0.1]) posteriorSampleSize = 20 #Lets get an empirical estimate of Sprime model = NormalModel(abcMetrics) model.setMu(theta[0]) model.setSigma(theta[1]) Sprime = abcMetrics.summary(model.simulate()) logging.debug(("Real summary statistic: " + str(Sprime))) thetaDir = PathDefaults.getTempDir() abcSMC = ABCSMC(epsilonArray, createNormalModel, abcParams, thetaDir) abcSMC.maxRuns = 100000 abcSMC.setPosteriorSampleSize(posteriorSampleSize) thetasArray = abcSMC.run() thetasArray = numpy.array(thetasArray) meanTheta = numpy.mean(thetasArray, 0) logging.debug((thetasArray.shape)) logging.debug(thetasArray) logging.debug(meanTheta) print(thetasArray.shape[0], posteriorSampleSize) #Note only mean needs to be similar self.assertTrue(thetasArray.shape[0] >= posteriorSampleSize) self.assertEquals(thetasArray.shape[1], 2) self.assertTrue(numpy.linalg.norm(theta[0] - meanTheta[0]) < 0.2)
def testLoadParams(self): try: lmbda = 0.01 alterRegressor = PrimalRidgeRegression(lmbda) egoRegressor = PrimalRidgeRegression(lmbda) predictor = EgoEdgeLabelPredictor(alterRegressor, egoRegressor) params = [0.1, 0.2] paramFuncs = [egoRegressor.setLambda, alterRegressor.setLambda] fileName = PathDefaults.getTempDir() + "tempParams.pkl" predictor.saveParams(params, paramFuncs, fileName) params2 = predictor.loadParams(fileName) self.assertTrue(params2[0][0] == "apgl.predictors.PrimalRidgeRegression") self.assertTrue(params2[0][1] == "setLambda") self.assertTrue(params2[0][2] == 0.1) self.assertTrue(params2[1][0] == "apgl.predictors.PrimalRidgeRegression") self.assertTrue(params2[1][1] == "setLambda") self.assertTrue(params2[1][2] == 0.2) except IOError as e: logging.warn(e)
def testLoadParams(self): try: lmbda = 0.01 alterRegressor = PrimalRidgeRegression(lmbda) egoRegressor = PrimalRidgeRegression(lmbda) predictor = EgoEdgeLabelPredictor(alterRegressor, egoRegressor) params = [0.1, 0.2] paramFuncs = [egoRegressor.setLambda, alterRegressor.setLambda] fileName = PathDefaults.getTempDir() + "tempParams.pkl" predictor.saveParams(params, paramFuncs, fileName) params2 = predictor.loadParams(fileName) self.assertTrue( params2[0][0] == "apgl.predictors.PrimalRidgeRegression") self.assertTrue(params2[0][1] == "setLambda") self.assertTrue(params2[0][2] == 0.1) self.assertTrue( params2[1][0] == "apgl.predictors.PrimalRidgeRegression") self.assertTrue(params2[1][1] == "setLambda") self.assertTrue(params2[1][2] == 0.2) except IOError as e: logging.warn(e)
def testSaveParams(self): try: lmbda = 0.01 alterRegressor = PrimalRidgeRegression(lmbda) egoRegressor = PrimalRidgeRegression(lmbda) predictor = EgoEdgeLabelPredictor(alterRegressor, egoRegressor) params = [0.1, 0.2] paramFuncs = [egoRegressor.setLambda, alterRegressor.setLambda] fileName = PathDefaults.getTempDir() + "tempParams.pkl" predictor.saveParams(params, paramFuncs, fileName) except IOError as e: logging.warn(e)
def profile(command, globalVars, localVars, numStats=30): """ Just profile the given command with the global and local variables and print out the cumulative and function times. """ try: import pstats import cProfile except ImportError: raise ImportError("profile() requires pstats and cProfile") tempDirectory = PathDefaults.getTempDir() profileFileName = tempDirectory + "profile.cprof" logging.info("Starting to profile ...") cProfile.runctx(command, globalVars, localVars, profileFileName) logging.info("Done") stats = pstats.Stats(profileFileName) stats.strip_dirs().sort_stats("cumulative").print_stats(numStats) stats.strip_dirs().sort_stats("time").print_stats(numStats)