def testMeanVariance(errorLimit=1e-11,verbose=True): print '*************************************', print 'TESTING MEAN VARIANCE' numDims=2 n=480 data = create_test_data_correlated_returns(n=n, numDims=numDims, includeResponse=False) dataY = data['data'][:,1] # Create Regression Window windowSize = 100 RW = RollingMeanVarianceWindow(dataY,windowSize) spots = np.random.randint(n-windowSize,size=(10)) for spot in spots: y = dataY[spot:spot+windowSize] mean = np.mean(y) variance = np.var(y) createdMean = RW.means[spot] createdVariance = RW.variances[spot] print '*************************************', print spot if verbose: print 'Mean:', print mean, print createdMean, print mean - createdMean # print '*************' print 'Variance:', print variance, print createdVariance, print variance - createdVariance print ('OK!' if (abs(mean - createdMean) < errorLimit) and (abs(variance - createdVariance) < errorLimit) else 'Failed')
def testNonSequentialRegression(errorLimit=1e-11,verbose=True): print '*************************************', print 'TESTING NON SEQUENTIAL REGRESSION' numDims=2 n=480 data = create_test_data_correlated_returns(n=n, numDims=numDims, includeResponse=False) dataX = data['data'][:,0] dataY = data['data'][:,1] # Create Regression Window windowSize = 100 RW = NonSequentialRegressionWindow(dataX,dataY,windowSize) spots = np.random.randint(n-windowSize,size=(10)) for spot in spots: x = dataX[spot:spot+windowSize] y = dataY[spot:spot+windowSize] (slope, intercept, r_value, p_value, std_err) = stats.linregress(x,y) createdSlope = RW.slopes[spot] createdIntercept = RW.intercepts[spot] print '*************************************', print spot if verbose: print 'Slope:', print slope, print createdSlope, print slope - createdSlope # print '*************' print 'Intercept:', print intercept, print createdIntercept, print intercept - createdIntercept print ('OK!' if (abs(slope - createdSlope) < errorLimit) and (abs(intercept - createdIntercept) < errorLimit) else 'Failed')
def testTransformationReversals(): testInfo = utl_Tst.create_test_data_correlated_returns(numDims=1, n=100) data = testInfo['data'][:,0] dt = testInfo['dt'] hndl_Srs_Original = EMF_TestSeries_Handle() hndl_Srs_Original.set_series_dates(dt) hndl_Srs_Original.set_series_values(data) # utl_Tst.plot_data_series(hndl_Srs_Original) # testTransformationReversal_None(data, dt) # testTransformationReversal_Past_Lvl(data, dt, hndl_Srs_Original) testTransformationReversal_Past_FoD(data, dt, hndl_Srs_Original) testTransformationReversal_Future_FoD(data, dt, hndl_Srs_Original)
def main(): data = utl_Tst.create_test_data_linear_change(n=100, increase=1)['data'] hndl_Time = get_test_TimeHandle() test_None_pattern(data, hndl_Time) hndl_Time = get_test_TimeHandle() test_Past_Change_pattern(data, hndl_Time) hndl_Time = get_test_TimeHandle() test_Futr_Level_pattern(data, hndl_Time) hndl_Time = get_test_TimeHandle() test_Past_Level_pattern(data, hndl_Time) data = utl_Tst.create_test_data_correlated_returns(numDims=1, n=100)['data'][:,0] hndl_Time = get_test_TimeHandle() test_Current_Level_Cat_pattern(data, hndl_Time)
def generate_d3_JSON_ParallelCoords(self): # Generate 'History' n = 120 # 10 years n_fd = 12 series = util_Tst.create_test_data_correlated_returns(n=n, numDims=1, includeResponse=False) dt = util_Tst.create_monthly_date_range(n=n) vals = series['data'] json_history = [DATA_SERIES_TO_JSON(d,v) for (d,v) in zip(dt, vals)] # Generate Predictions std = np_std(transform_FOD_BackwardLooking(vals,{utl_Trns.FIRST_ORDER_DIFF_TIME:1})) end_val = vals[-1,0] def get_random_prediction_values(per_fd): numPreds = 40 preds = [] for i in xrange(numPreds): preds.append(end_val + normal()*std*sqrt(per_fd)) return (range(numPreds), preds) def get_model_metadata(model_idx): return { JSON_MODEL_ID : model_idx, JSON_MODEL_CONFIDENCE : random(), JSON_MODEL_DESC : 'junkdesc ' + str(model_idx) } end_dt = dt[-1] prd_dt = util_Tst.create_monthly_date_range(n=n_fd+1, startEpoch=end_dt+10000) #hacky, but end of next month models = {} preds = [] for (i, dt) in enumerate(prd_dt): (model_idxs, pred_values) = get_random_prediction_values(i) models.update(dict.fromkeys(model_idxs)) for (md, vl) in zip(model_idxs, pred_values): preds.append({ JSON_MODEL_ID: md, JSON_DATE_KEY: dt_epoch_to_str_Y_M_D(dt), JSON_VALUE_KEY: vl }) for md in models.keys(): models[md] = get_model_metadata(md) # Save data dataName = 'test1' filePath = get_json_history_path(dataName) save_to_JSON(filePath, json_history) filePath = get_json_predictions_path(dataName) save_to_JSON(filePath, preds) filePath = get_json_model_path(dataName) save_to_JSON(filePath, models) # if __name__ == '__main__': # generator = EMF_TestDataGenerator() # generator.generate_d3_JSON_ParallelCoords()