def main(dataset: str, target: str, symbol: str): ds_service = DatasetService() ds = ds_service.get_dataset(name=dataset, symbol=symbol) fs = DatasetService.get_feature_selection(ds=ds, method='importances_shap', target='class') # hierarchy = load_hierarchy(f"{dataset}_{target}_feature_hierarchy.yml", importances=fs.feature_importances) # hdf = pd.DataFrame(hierarchy) # fig = px.treemap(hdf, path=['category', 'subgroup', 'name'], values='importance') # fig.show() # # fig = px.sunburst(hdf, path=['category', 'subgroup', 'name'], values='importance') # fig.show() shap_values, shap_expected_values = parse_shap_values(fs.shap_values) X = ds_service.get_dataset_features(ds=ds, begin=fs.search_interval.begin, end=fs.search_interval.end) y = ds_service.get_target(name='class', symbol=symbol, begin=fs.search_interval.begin, end=fs.search_interval.end) fig = plt.figure() plt.suptitle(f"Shap summary plot for {dataset}.{symbol} -> {target}") shap.summary_plot(shap_values, X, class_names=["SELL", "HOLD", "BUY"], show=False, max_display=352, use_log_scale=True) plt.tight_layout() fig.show() shap_dfs = [] for cls, arr in enumerate(shap_values): class_df = pd.DataFrame(arr, columns=X.columns, index=X.index) class_df.columns = [f"{c}_class{cls}" for c in class_df.columns] shap_dfs.append(class_df) shap_df = pd.concat(shap_dfs, axis='columns') shap_df = shap_df.reindex(sorted(shap_df.columns), axis=1) print(shap_df.head())
def predict_day(self, pipeline: str, dataset: str, target: str, symbol: str, day: str, window: dict): model = self.get_model(pipeline, dataset, target, symbol) # Load dataset ds = DatasetService() d = ds.get_dataset(model.dataset, model.symbol) # Get training data including the first training window begin = sub_interval(timestamp=day, interval=window) if from_timestamp(d.valid_index_min).timestamp() > from_timestamp( begin).timestamp(): raise MessageException("Not enough data for training! [Pipeline: {} Dataset: {} Symbol: {} Window: {}]" \ .format(model.pipeline, model.dataset, model.symbol, window)) X = ds.get_features(model.dataset, model.symbol, begin=begin, end=day) y = ds.get_target(model.target, model.symbol, begin=begin, end=day) unique, counts = np.unique(y, return_counts=True) if len(unique) < 2: logging.error( "[{}-{}-{}-{}]Training data contains less than 2 classes: {}". format(model.symbol, model.dataset, model.target, model.pipeline, unique)) raise MessageException( "Training data contains less than 2 classes: {}".format( unique)) # Load pipeline pipeline_module = get_pipeline(model.pipeline) # Slice testing interval in windows df = predict_day(pipeline_module.estimator, model.parameters[-1], X, y, day) return df
def test_model(self, model: Model, mt: ModelTest, **kwargs): if not model.id: model = self.model_repo.create(model) if self.model_repo.exist_test(model.id, mt.task_key): logging.info("Model {} test {} already executed!".format( model.id, mt.task_key)) return mt # Load dataset ds = DatasetService() d = ds.get_dataset(model.dataset, model.symbol) # Get training data including the first training window begin = sub_interval(timestamp=mt.test_interval.begin, interval=mt.window) end = add_interval(timestamp=mt.test_interval.end, interval=mt.step) if from_timestamp(d.valid_index_min).timestamp() > from_timestamp( begin).timestamp(): raise MessageException("Not enough data for training! [Pipeline: {} Dataset: {} Symbol: {} Window: {}]" \ .format(model.pipeline, model.dataset, model.symbol, mt.window)) X = ds.get_features(model.dataset, model.symbol, begin=begin, end=end) y = ds.get_target(model.target, model.symbol, begin=begin, end=end) unique, counts = np.unique(y, return_counts=True) if len(unique) < 2: logging.error( "[{}-{}-{}-{}]Training data contains less than 2 classes: {}". format(model.symbol, model.dataset, model.target, model.pipeline, unique)) raise MessageException( "Training data contains less than 2 classes: {}".format( unique)) # Load pipeline pipeline_module = get_pipeline(model.pipeline) # Slice testing interval in windows ranges = timestamp_windows(begin, end, mt.window, mt.step) mt.start_at = get_timestamp() df = test_windows(pipeline_module.estimator, mt.parameters, X, y, ranges) mt.end_at = get_timestamp() mt.classification_results = df.to_dict() clf_report = flattened_classification_report_imbalanced( df.label, df.predicted) roc_report = roc_auc_report( df.label, df.predicted, df[[c for c in df.columns if '_proba_' in c]]) clf_report.update(roc_report) mt.classification_report = clf_report self.model_repo.append_test(model.id, mt) return mt
def get_dataset( symbol: str, dataset: Optional[str] = None, target: Optional[str] = None, begin: Optional[str] = None, end: Optional[str] = None, service: DatasetService = Depends(DatasetService), ): if not dataset and not target: raise HTTPException( status_code=400, detail= "At least one of 'dataset' or 'target' parameters must be specified!" ) _name = dataset if not _name: _name = 'target' d = service.get_dataset(name=_name, symbol=symbol) # If begin/end not specified, use recorded. # If auto use valid. if not begin: begin = d.index_min elif begin == 'auto': begin = d.valid_index_min if not end: end = d.index_max elif end == 'auto': end = d.valid_index_max # Retrieve dataframes dfs = [] if dataset: df = service.get_features(name=dataset, symbol=symbol, begin=begin, end=end) dfs.append(df) if target: dfs.append( service.get_target(name=target, symbol=symbol, begin=begin, end=end)) # Concatenate dataframes and target res = pd.concat(dfs, axis='columns') if len(dfs) > 1 else dfs[0] # Return CSV return res.to_csv(index_label='time')
class GridSearchService: def __init__(self): self.model_repo = ModelRepository() self.model_service = ModelService() self.dataset_service = DatasetService() def create_parameters_search(self, model: Model, split: float, **kwargs) -> ModelParameters: ds = self.dataset_service.get_dataset(model.dataset, model.symbol) splits = DatasetService.get_train_test_split_indices(ds, split) # Features can either be a list of features to use, or a string # If it is a string, and it is "latest", pick the latest features = kwargs.get('features') # if isinstance(features, str) and features == 'latest': # if model.features: # features = model.features[-1].features # else: # features = None if features: target = kwargs.get('target', 'class') mf = DatasetService.get_feature_selection( ds=ds, method=kwargs.get('features'), target=target) if not mf: raise MessageException( f"Feature selection not found for {model.dataset}.{model.symbol} -> {target}!" ) features = mf.features # Determine K for K-fold cross validation based on dataset's sample count # Train-test split for each fold is 80% train, the lowest training window for accurate results is 30 samples # so we need X samples where X is given by the proportion: # 30/0.8 = X/1; X= 30/0.8 = 37.5 ~ 40 samples per fold X = 40 k = 5 # If samples per fold with 5-fold CV are too low, use 3-folds if ds.count / k < X: k = 3 # If samples are still too low, raise a value error if ds.count / k < X and not kwargs.get("permissive"): raise ValueError("Not enough samples to perform cross validation!") result = ModelParameters(cv_interval=splits['train'], cv_splits=k, task_key=kwargs.get('task_key', str(uuid4())), features=features or None) return result def _get_dataset_and_pipeline(self, model: Model, mp: ModelParameters, **kwargs): if not model.id: # Make sure the task exists model = self.model_repo.create(model) if self.model_repo.exist_parameters(model.id, mp.task_key): logging.info("Model {} Grid search {} already executed!".format( model.id, mp.task_key)) return mp # Load dataset X = self.dataset_service.get_features(model.dataset, model.symbol, mp.cv_interval.begin, mp.cv_interval.end, columns=mp.features) y = self.dataset_service.get_target(model.target, model.symbol, mp.cv_interval.begin, mp.cv_interval.end) unique, counts = np.unique(y, return_counts=True) if len(unique) < 2: logging.error( "[{}-{}-{}-{}]Training data contains less than 2 classes: {}". format(model.symbol, model.dataset, model.target, model.pipeline, unique)) raise MessageException( "Training data contains less than 2 classes: {}".format( unique)) logging.info("Dataset loaded: X {} y {} (unique: {})".format( X.shape, y.shape, unique)) # Load pipeline pipeline_module = get_pipeline(model.pipeline) return pipeline_module, X, y def grid_search(self, model: Model, mp: ModelParameters, **kwargs) -> ModelParameters: pipeline_module, X, y = self._get_dataset_and_pipeline(model, mp) tag = "{}-{}-{}-{}-{}" \ .format(model.symbol, model.dataset, model.target, model.pipeline, dict_hash(mp.parameters)) # Perform search if not kwargs.get('halving'): gscv = GridSearchCV( estimator=pipeline_module.estimator, param_grid=kwargs.get('parameter_grid', pipeline_module.PARAMETER_GRID), # cv=BlockingTimeSeriesSplit(n_splits=mp.cv_splits), cv=StratifiedKFold(n_splits=mp.cv_splits), scoring=get_precision_scorer(), verbose=kwargs.get("verbose", 0), n_jobs=kwargs.get("n_jobs", None), refit=False) else: gscv = HalvingGridSearchCV( estimator=pipeline_module.estimator, param_grid=kwargs.get('parameter_grid', pipeline_module.PARAMETER_GRID), factor=2, cv=BlockingTimeSeriesSplit(n_splits=mp.cv_splits), scoring=get_precision_scorer(), verbose=kwargs.get("verbose", 0), n_jobs=kwargs.get("n_jobs", cpu_count() / 2), refit=False, random_state=0) try: mp.start_at = get_timestamp() # Log starting timestamp gscv.fit(X, y) mp.end_at = get_timestamp() # Log ending timestamp except SplitException as e: logging.exception( "Model {} splitting yields single-class folds!\n{}".format( tag, e.message)) return mp # Fit failed, don't save this. except ValueError as e: logging.exception("Model {} raised ValueError!\n{}".format(tag, e)) return mp # Fit failed, don't save this. # Collect results results_df = pd.DataFrame(gscv.cv_results_) # Update search request with results mp.parameter_search_method = 'halving_grid_search' if kwargs.get( 'halving') else 'gridsearch' mp.parameters = gscv.best_params_ mp.cv_results = results_df.to_dict() mp.result_file = 'cv_results-{}.csv'.format(tag) # Save grid search results on storage if kwargs.get('save', True): storage_service.upload_json_obj(mp.parameters, 'grid-search-results', 'parameters-{}.json'.format(tag)) storage_service.save_df(results_df, 'grid-search-results', mp.result_file) # Update model with the new results self.model_repo.append_parameters(model.id, mp) return mp def random_search(self, model: Model, mp: ModelParameters, **kwargs) -> ModelParameters: pipeline_module, X, y = self._get_dataset_and_pipeline(model, mp) tag = "{}-{}-{}-{}-{}" \ .format(model.symbol, model.dataset, model.target, model.pipeline, dict_hash(mp.parameters)) rscv = RandomizedSearchCV(estimator=pipeline_module.estimator, param_distributions=kwargs.get( 'param_distributions', pipeline_module.PARAMETER_DISTRIBUTION), n_iter=kwargs.get('n_iter', 10), cv=StratifiedKFold(n_splits=mp.cv_splits), scoring=get_precision_scorer(), verbose=kwargs.get("verbose", 0), n_jobs=kwargs.get("n_jobs", None), refit=False, random_state=0) try: mp.start_at = get_timestamp() # Log starting timestamp rscv.fit(X, y) mp.end_at = get_timestamp() # Log ending timestamp except SplitException as e: logging.exception( "Model {} splitting yields single-class folds!\n{}".format( tag, e.message)) return mp # Fit failed, don't save this. except ValueError as e: logging.exception("Model {} raised ValueError!\n{}".format(tag, e)) return mp # Fit failed, don't save this. # Collect results results_df = pd.DataFrame(rscv.cv_results_) # Update search request with results mp.parameter_search_method = 'randomsearch' mp.parameters = rscv.best_params_ mp.result_file = 'cv_results-{}.csv'.format(tag) # Save grid search results on storage if kwargs.get('save', True): storage_service.upload_json_obj(mp.parameters, 'random-search-results', 'parameters-{}.json'.format(tag)) storage_service.save_df(results_df, 'random-search-results', mp.result_file) # Update model with the new results self.model_repo.append_parameters(model.id, mp) return mp def grid_search_new(self, symbol: str, dataset: str, target: str, pipeline: str, split: float, feature_selection_method: str, **kwargs): # Check if a model exists and has same search method existing_model = self.model_service.get_model(pipeline=pipeline, dataset=dataset, target=target, symbol=symbol) if existing_model: mp_exists = ModelService.get_model_parameters(existing_model, method='gridsearch') if mp_exists: if kwargs.get('replace'): self.model_service.remove_parameters(model=existing_model, method='gridsearch') else: if kwargs.get('save'): raise MessageException( f"Grid search already performed for {pipeline}({dataset}.{symbol}) -> {target}" ) # Retrieve dataset to use ds = self.dataset_service.get_dataset(dataset, symbol) # Determine cv_splits=K for K-fold cross validation based on dataset's sample count # Train-test split for each fold is 80% train, the lowest training window for accurate results is 30 samples # so we need X samples where X is given by the proportion: # 30/0.8 = X/1; X= 30/0.8 = 37.5 ~ 40 samples per fold X = 40 cv_splits = 5 # If samples per fold with 5-fold CV are too low, use 3-folds if ds.count / cv_splits < X: cv_splits = 3 # If samples are still too low, raise a value error if ds.count / cv_splits < X and not kwargs.get("permissive"): raise ValueError("Not enough samples to perform cross validation!") # Determine split indices based on dataset splits = DatasetService.get_train_test_split_indices(ds, split) cv_interval = splits['train'] # Load dataset features by applying a specified feature selection method X = self.dataset_service.get_dataset_features( ds=ds, begin=cv_interval['begin'], end=cv_interval['end'], method=feature_selection_method, target=target) y = self.dataset_service.get_target( name=target, symbol=symbol, begin=cv_interval['begin'], end=cv_interval['end'], ) # Check number of samples for each class in training data, if less than 3 instances are present for # each class, we're going to get a very unstable model (or no model at all for k-NN based algos) unique, counts = np.unique(y, return_counts=True) if len(unique) < 2: logging.error( "[{}-{}-{}-{}]Training data contains less than 2 classes: {}". format(symbol, dataset, target, pipeline, unique)) raise MessageException( "Training data contains less than 2 classes: {}".format( unique)) logging.info("Dataset loaded: X {} y {} (unique: {})".format( X.shape, y.shape, unique)) # Load pipeline algorithm and parameter grid pipeline_module = get_pipeline(pipeline) # Perform search gscv = GridSearchCV( estimator=pipeline_module.estimator, param_grid=kwargs.get('parameter_grid', pipeline_module.PARAMETER_GRID), # cv=BlockingTimeSeriesSplit(n_splits=mp.cv_splits), cv=StratifiedKFold(n_splits=cv_splits), scoring=get_precision_scorer(), verbose=kwargs.get("verbose", 0), n_jobs=kwargs.get("n_jobs", None), refit=False) mp = ModelParameters(cv_interval=splits['train'], cv_splits=cv_splits, task_key=kwargs.get('task_key', str(uuid4())), features=[c for c in X.columns], parameter_search_method='gridsearch') mp.start_at = get_timestamp() gscv.fit(X, y) mp.end_at = get_timestamp() # Collect results results_df = pd.DataFrame(gscv.cv_results_) mp.parameters = gscv.best_params_ mp.cv_results = results_df.loc[:, results_df.columns != 'params'].to_dict( 'records') tag = "{}-{}-{}-{}-{}".format(symbol, dataset, target, pipeline, dict_hash(mp.parameters)) mp.result_file = 'cv_results-{}.csv'.format(tag) # Is there an existing model for this search? model = Model(pipeline=pipeline, dataset=dataset, target=target, symbol=symbol, features=feature_selection_method) model.parameters.append(mp) self.model_repo.create(model) # Save grid search results on storage if kwargs.get('save', True): storage_service.upload_json_obj(mp.parameters, 'grid-search-results', 'parameters-{}.json'.format(tag)) storage_service.save_df(results_df, 'grid-search-results', mp.result_file) return mp
class FeatureSelectionService: def __init__(self): self.model_repo = ModelRepository() self.dataset_service = DatasetService() def create_features_search(self, *, symbol: str, dataset: str, target: str, split: float, method: str, task_key: str = None) -> ModelFeatures: ds = self.dataset_service.get_dataset(dataset, symbol) splits = DatasetService.get_train_test_split_indices(ds, split) result = ModelFeatures(dataset=dataset, target=target, symbol=symbol, search_interval=splits['train'], feature_selection_method=method, task_key=task_key or str(uuid4())) return result def feature_selection(self, mf: ModelFeatures, **kwargs) -> ModelFeatures: # Load dataset X = self.dataset_service.get_features(mf.dataset, mf.symbol, mf.search_interval.begin, mf.search_interval.end, columns=mf.features) y = self.dataset_service.get_target(mf.target, mf.symbol, mf.search_interval.begin, mf.search_interval.end) unique, counts = np.unique(y, return_counts=True) if len(unique) < 2: logging.error( "[{}-{}-{}]Training data contains less than 2 classes: {}". format(mf.symbol, mf.dataset, mf.target, unique)) raise MessageException( "Training data contains less than 2 classes: {}".format( unique)) # Perform search mf.start_at = get_timestamp() # Log starting timestamp if not mf.feature_selection_method or mf.feature_selection_method == 'importances': selector = select_from_model(X, y) mf.feature_importances = label_feature_importances( selector.estimator_, X.columns) elif mf.feature_selection_method == 'importances_cv': selector = select_from_model_cv(X, y) mf.feature_importances = label_feature_importances( selector.estimator_.best_estimator_, X.columns) elif mf.feature_selection_method == 'fscore': selector = select_percentile(X, y, percentile=10) elif mf.feature_selection_method == 'relieff': selector = select_relieff(X, y, percentile=10) elif mf.feature_selection_method == 'multisurf': selector = select_multisurf(X, y, percentile=10) else: raise NotFoundException( "Cannot find feature selection method by {}".format( mf.feature_selection_method)) mf.end_at = get_timestamp() # Log ending timestamp # Update search request with results mf.features = label_support(selector.get_support(), X.columns) # Update model with the new results if kwargs.get('save', True): self.model_repo.append_features_query( { "dataset": mf.dataset, "symbol": mf.symbol, "target": mf.target }, mf) return mf def get_available_symbols(self, dataset: str): return self.dataset_service.get_dataset_symbols(name=dataset) def feature_selection_new(self, *, symbol: str, dataset: str, target: str, split: float, method: str, **kwargs) -> ModelFeatures: ds = self.dataset_service.get_dataset(dataset, symbol) fs_exists = DatasetService.has_feature_selection(ds=ds, method=method, target=target) if fs_exists: if kwargs.get('replace'): self.dataset_service.remove_feature_selection(ds=ds, method=method, target=target) else: if kwargs.get('save'): raise MessageException( f"Feature selection with method '{method}' alrady performed for '{dataset}.{symbol}' and target '{target}'" ) splits = DatasetService.get_train_test_split_indices(ds, split) fs = FeatureSelection(target=target, method=method, search_interval=splits['train'], task_key=kwargs.get('task_key', str(uuid4()))) # Load dataset X = self.dataset_service.get_dataset_features( ds=ds, begin=fs.search_interval.begin, end=fs.search_interval.end) y = self.dataset_service.get_dataset_target( name=fs.target, ds=ds, begin=fs.search_interval.begin, end=fs.search_interval.end) unique, counts = np.unique(y, return_counts=True) if len(unique) < 2: logging.error( "[{}-{}-{}]Training data contains less than 2 classes: {}". format(symbol, dataset, target, unique)) raise MessageException( "Training data contains less than 2 classes: {}".format( unique)) # Perform search fs.start_at = get_timestamp() # Log starting timestamp if not fs.method or 'importances' in fs.method: if '_cv' in fs.method: selector = select_from_model_cv(X, y) else: selector = select_from_model(X, y) fs.feature_importances = label_feature_importances( selector.estimator_, X.columns) if '_shap' in fs.method: fs.shap_values = get_shap_values( model=selector.estimator_.named_steps.c, X=X, X_train=X) shap_values = parse_shap_values(fs.shap_values) elif fs.method == 'fscore': selector = select_percentile(X, y, percentile=10) elif fs.method == 'relieff': selector = select_relieff(X, y, percentile=10) elif fs.method == 'multisurf': selector = select_multisurf(X, y, percentile=10) else: raise NotFoundException( "Cannot find feature selection method by {}".format(fs.method)) fs.end_at = get_timestamp() # Log ending timestamp # Update search request with results fs.features = label_support(selector.get_support(), X.columns) if not kwargs.get('save'): return fs return self.dataset_service.append_feature_selection(ds, fs)