def _load_dataset(self, dataset_name): if dataset_name == "bc-data": self._dataset = bc.load() elif dataset_name == "total-load": self._dataset = ul.total_experiment_load()[1]['Load'] else: raise RuntimeError("Invalid dataset name: %s" % dataset_name)
def _prepare_dataset(self): """Return datasets for the experiment periods containing the total load for all users in the experiment.""" print "Using total load rather than per-user load." temps = read_temperatures() loads = pd.concat(ul.total_experiment_load()) return [self._join_temp_and_load(temps, loads, period) for period in ul.experiment_periods()]
def _prepare_dataset(self): """Return datasets for the experiment periods containing the total load for all users in the experiment.""" loads = pd.concat(ul.total_experiment_load()) return [ ul.add_temperatures(loads, period) for period in ul.experiment_periods() ]
def _prepare_dataset(self): """Return datasets for the experiment periods containing the total load for all users in the experiment.""" print "Using total load rather than per-user load." temps = read_temperatures() loads = pd.concat(ul.total_experiment_load()) return [ self._join_temp_and_load(temps, loads, period) for period in ul.experiment_periods() ]
def _get_smoother(): # Set slow_smoother to True in order to see the actual time consumed by the # B-spline smoothing operation. If set to False, will use the default # smoother where the roughness matrices are cached. slow_smoother = True if slow_smoother: print "Using slow, analytic, non-caching smoother." return cln.BSplineAnalyticSmoother else: print "Using not quite so slow, caching smoother." return cln.BSplineSmoother # Load a dataset containing power load history. This set is divided into # training and test data, we only keep the traning part for now. dataset, _ = ul.total_experiment_load() # Set parameters for the B-spline smoother/cleanser smoothness = 10 zscore = 0.5 # Try smoothing/cleansing different time series lengths for hindsight_days in [1]: # Select data num_hours = 24 * hindsight_days data = dataset["Load"][-num_hours:].copy() # Some output and rough timing print "Cleansing %d hours of data with smoothness %.2f, z-score %.2f..." % (num_hours, smoothness, zscore) sys.stdout.flush() start_time = time.time() # This is the part that takes time smoother = _get_smoother()(data, smoothness)
def _get_smoother(): # Set slow_smoother to True in order to see the actual time consumed by the # B-spline smoothing operation. If set to False, will use the default # smoother where the roughness matrices are cached. slow_smoother = True if slow_smoother: print "Using slow, analytic, non-caching smoother." return cln.BSplineAnalyticSmoother else: print "Using not quite so slow, caching smoother." return cln.BSplineSmoother # Load a dataset containing power load history. This set is divided into # training and test data, we only keep the traning part for now. dataset, _ = ul.total_experiment_load() # Set parameters for the B-spline smoother/cleanser smoothness = 10 zscore = 0.5 # Try smoothing/cleansing different time series lengths for hindsight_days in [1]: # Select data num_hours = 24 * hindsight_days data = dataset["Load"][-num_hours:].copy() # Some output and rough timing print "Cleansing %d hours of data with smoothness %.2f, z-score %.2f..." % \ (num_hours, smoothness, zscore) sys.stdout.flush() start_time = time.time() # This is the part that takes time
def _prepare_dataset(self): """Return datasets for the experiment periods containing the total load for all users in the experiment.""" loads = pd.concat(ul.total_experiment_load()) return [ul.add_temperatures(loads, period) for period in ul.experiment_periods()]
def setUpClass(cls): cls.data = ul.add_temperatures(ul.total_experiment_load()[0], ul.experiment_periods()[0])