def _default_params(cls): return { 'regressors': [ pp.StandardScaler() | lm.LinearRegression(intercept_lr=.1), pp.StandardScaler() | lm.PARegressor(), ] }
def __init__(self, name, identifier, availability_topic, number_of_sensors, number_of_metrics, logger, model, learning_time=dt.timedelta(days=7)): self._name = name self._identifier = identifier self._availability_topic = availability_topic self._date_of_birth = dt.datetime.now() self._learning_time = learning_time self._sensors = {} self._metrics = {} self._switch = None self._blinds = None self._number_of_sensors = number_of_sensors self._number_of_metrics = number_of_metrics self._logger = logger #self._last_true = 100 self._last_example = np.array([]) self._last_pred = None self._able_to_predict = True if model == 1: self.log_message('info', 'Using AdaptiveRandomForestRegressor model') self._model = AdaptiveRandomForestRegressor(random_state=43, n_estimators=100, grace_period=50, max_features=11, leaf_prediction='mean', split_confidence=0.09, lambda_value=10) elif model == 2: self.log_message('info', 'Using PARegressor model') self._model = preprocessing.StandardScaler() | compose.Discard( 'lights') | linear_model.PARegressor(C=0.05, mode=1, eps=0.1)
def get_all_estimators(): ignored = (Creme2SKLBase, SKL2CremeBase, compat.PyTorch2CremeRegressor, compose.FuncTransformer, compose.Pipeline, ensemble.StackingBinaryClassifier, feature_extraction.Agg, feature_extraction.TargetAgg, feature_extraction.Differ, feature_selection.PoissonInclusion, imblearn.RandomOverSampler, imblearn.RandomUnderSampler, imblearn.RandomSampler, impute.PreviousImputer, impute.StatImputer, linear_model.FFMClassifier, linear_model.FFMRegressor, linear_model.FMClassifier, linear_model.FMRegressor, linear_model.HOFMClassifier, linear_model.HOFMRegressor, linear_model.SoftmaxRegression, meta.PredClipper, meta.TransformedTargetRegressor, multioutput.ClassifierChain, multioutput.RegressorChain, preprocessing.OneHotEncoder, reco.Baseline, reco.BiasedMF, reco.FunkMF, reco.RandomNormal, time_series.Detrender, time_series.GroupDetrender, time_series.SNARIMAX) def is_estimator(obj): return inspect.isclass(obj) and issubclass(obj, base.Estimator) for submodule in importlib.import_module('creme').__all__: if submodule == 'base': continue for _, obj in inspect.getmembers( importlib.import_module(f'creme.{submodule}'), is_estimator): if issubclass(obj, ignored): continue elif issubclass(obj, dummy.StatisticRegressor): inst = obj(statistic=stats.Mean()) elif issubclass(obj, meta.BoxCoxRegressor): inst = obj(regressor=linear_model.LinearRegression()) elif issubclass(obj, tree.RandomForestClassifier): inst = obj() elif issubclass(obj, ensemble.BaggingClassifier): inst = obj(linear_model.LogisticRegression()) elif issubclass(obj, ensemble.BaggingRegressor): inst = obj(linear_model.LinearRegression()) elif issubclass(obj, ensemble.AdaBoostClassifier): inst = obj(linear_model.LogisticRegression()) elif issubclass(obj, ensemble.HedgeRegressor): inst = obj([ preprocessing.StandardScaler() | linear_model.LinearRegression(intercept_lr=.1), preprocessing.StandardScaler() | linear_model.PARegressor(), ]) elif issubclass(obj, feature_selection.SelectKBest): inst = obj(similarity=stats.PearsonCorrelation()) elif issubclass(obj, linear_model.LinearRegression): inst = preprocessing.StandardScaler() | obj(intercept_lr=.1) elif issubclass(obj, linear_model.PARegressor): inst = preprocessing.StandardScaler() | obj() elif issubclass(obj, multiclass.OneVsRestClassifier): inst = obj(binary_classifier=linear_model.LogisticRegression()) else: inst = obj() yield inst
def get_all_estimators(): ignored = (CremeBaseWrapper, SKLBaseWrapper, base.Wrapper, compose.FuncTransformer, ensemble.StackingBinaryClassifier, feature_extraction.Agg, feature_extraction.TargetAgg, feature_extraction.Differ, linear_model.FMRegressor, linear_model.SoftmaxRegression, multioutput.ClassifierChain, multioutput.RegressorChain, naive_bayes.BernoulliNB, naive_bayes.ComplementNB, preprocessing.OneHotEncoder, tree.DecisionTreeClassifier) def is_estimator(obj): return inspect.isclass(obj) and issubclass(obj, base.Estimator) for submodule in importlib.import_module('creme').__all__: if submodule == 'base': continue for name, obj in inspect.getmembers( importlib.import_module(f'creme.{submodule}'), is_estimator): if issubclass(obj, ignored): continue if issubclass(obj, dummy.StatisticRegressor): inst = obj(statistic=stats.Mean()) elif issubclass(obj, ensemble.BaggingClassifier): inst = obj(linear_model.LogisticRegression()) elif issubclass(obj, ensemble.BaggingRegressor): inst = obj(linear_model.LinearRegression()) elif issubclass(obj, ensemble.HedgeRegressor): inst = obj([ preprocessing.StandardScaler() | linear_model.LinearRegression(intercept_lr=0.1), preprocessing.StandardScaler() | linear_model.PARegressor(), ]) elif issubclass(obj, feature_selection.RandomDiscarder): inst = obj(n_to_keep=5) elif issubclass(obj, feature_selection.SelectKBest): inst = obj(similarity=stats.PearsonCorrelation()) elif issubclass(obj, linear_model.LinearRegression): inst = preprocessing.StandardScaler() | obj(intercept_lr=0.1) elif issubclass(obj, linear_model.PARegressor): inst = preprocessing.StandardScaler() | obj() elif issubclass(obj, multiclass.OneVsRestClassifier): inst = obj(binary_classifier=linear_model.LogisticRegression()) else: inst = obj() yield inst
def build_model(data, frame_nos, max_frame, tot_objects, width, height, nrow_tiles, ncol_tiles, fps, pred_nframe): model = linear_model.PARegressor(C=0.01, mode=2, eps=0.001, data=data, learning_rate=0.005, rho=0.99) metric_X = metrics.MAE() metric_Y = metrics.MAE() manhattan_error = [] x_mae = [] y_mae = [] count = 0 i = 0 tile_manhattan_error = 0 act_tiles, pred_tiles = [], [] chunk_frames = [] #Initial training of first 5 seconds prev_frames = {0} while True: curr_frame = frame_nos[i] prev_frames.add(i) if curr_frame < 5 * fps: i = i + 1 [inp_i, x, y] = data[curr_frame] model = model.fit_one(inp_i, x, y) else: break prev_frames = sorted(prev_frames) cnt = 0 # Predicting frames and update model while True: curr_frame = frame_nos[i] nframe = min(pred_nframe, max_frame - frame_nos[i]) if (nframe < 1): break frames = {i} for k in range(i + 1, len(frame_nos)): if (frame_nos[k] < curr_frame + nframe): frames.add(k) else: i = k break if (i != k): i = k if (i == (len(frame_nos) - 1)): break frames = sorted(frames) chunk_frames.append(frames) metric_X, metric_Y, tile_manhattan_error, count, act_tiles, pred_tiles = pred_frames( data, model, metric_X, metric_Y, frames, prev_frames, tile_manhattan_error, act_tiles, pred_tiles, count, width, height, nrow_tiles, ncol_tiles) model = model.fit_n(frames) prev_frames = prev_frames + frames manhattan_error.append(tile_manhattan_error * 1.0 / count) x_mae.append(metric_X.get()) y_mae.append(metric_Y.get()) print("Manhattan Tile Error: " + str(tile_manhattan_error * 1.0 / count)) print(metric_X, metric_Y) print("\n") cnt = cnt + 1 if cnt == 60: break return act_tiles, pred_tiles, chunk_frames, manhattan_error, x_mae, y_mae