def scale(x_train, x_test=None, list_of_cols=[], method="minmax", keep_col=False, **algo_kwargs): """ Scales data according to a specific method. Parameters ---------- x_train : DataFrame Dataset x_test : DataFrame Testing dataset, by default None list_of_cols : list, optional A list of specific columns to apply this technique to If `list_of_cols` is not provided, the strategy will be applied to all numeric columns, by default [] method : str, optional Scaling method, by default 'minmax' keep_col : bool, optional True to not remove the columns, by default False algo_kwargs : optional Parmaters to pass into the scaler constructor from Scikit-Learn, by default {} Returns ------- Dataframe, *Dataframe Transformed dataframe with rows normalized. Returns 2 Dataframes if x_test is provided. """ list_of_cols = _numeric_input_conditions(list_of_cols, x_train) scaler = SCALER[method](**algo_kwargs) scaled_data = scaler.fit_transform(x_train[list_of_cols]) scaled_df = pd.DataFrame(scaled_data, columns=list_of_cols) x_train = drop_replace_columns(x_train, list_of_cols, scaled_df, keep_col=keep_col) if x_test is not None: scaled_x_test = scaler.transform(x_test) scaled_test_df = pd.DataFrame(scaled_x_test, columns=list_of_cols) x_test = drop_replace_columns(x_test, list_of_cols, scaled_test_df, keep_col=keep_col) return x_train, x_test
def normalize_numeric( self, *list_args, list_of_cols=[], keep_col=True, **normalize_params ): """ Function that normalizes all numeric values between 2 values to bring features into same domain. If `list_of_cols` is not provided, the strategy will be applied to all numeric columns. If a list of columns is provided use the list, otherwise use arguments. For more info please see: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html#sklearn.preprocessing.MinMaxScaler This function can be found in `preprocess/numeric.py` Parameters ---------- list_args : str(s), optional Specific columns to apply this technique to. list_of_cols : list, optional A list of specific columns to apply this technique to., by default [] feature_range : tuple(int or float, int or float), optional Min and max range to normalize values to, by default (0, 1) normalize_params : dict, optional Parmaters to pass into MinMaxScaler() constructor from Scikit-Learn Returns ------- Preprocess: Returns a deep copy of the Preprocess object. """ report_info = technique_reason_repo["preprocess"]["numeric"]["standardize"] list_of_cols = _input_columns(list_args, list_of_cols) self._data_properties.x_train, self._data_properties.x_test = scale( x_train=self._data_properties.x_train, x_test=self._data_properties.x_test, list_of_cols=list_of_cols, method="minmax", keep_col=keep_col, **normalize_params, ) if self.report is not None: if list_of_cols: self.report.report_technique(report_info, list_of_cols) else: list_of_cols = _numeric_input_conditions( list_of_cols, self._data_properties.x_train ) self.report.report_technique(report_info, list_of_cols) return self.copy()
def replace_missing_mean_median_mode(x_train, x_test=None, list_of_cols=[], strategy=""): """ Replaces missing values in every numeric column with the mean, median or mode of that column specified by strategy. Mean: Average value of the column. Effected by outliers. Median: Middle value of a list of numbers. Equal to the mean if x_train follows normal distribution. Not effected much by anomalies. Mode: Most common number in a list of numbers. Parameters ---------- x_train: Dataframe or array like - 2d Dataset x_test: Dataframe or array like - 2d Testing dataset, by default None. list_of_cols : list, optional A list of specific columns to apply this technique to If `list_of_cols` is not provided, the strategy will be applied to all numeric columns., by default [] strategy : str Strategy for replacing missing values. Can be either "mean", "median" or "most_frequent" Returns ------- Dataframe, *Dataframe Transformed dataframe with rows with a missing values in a specific column are missing Returns 2 Dataframes test if x_test is provided. """ if strategy != "most_frequent": list_of_cols = _numeric_input_conditions(list_of_cols, x_train) else: list_of_cols = _get_columns(list_of_cols, x_train) imp = SimpleImputer(strategy=strategy) fit_data = imp.fit_transform(x_train[list_of_cols]) fit_df = pd.DataFrame(fit_data, columns=list_of_cols) x_train = drop_replace_columns(x_train, list_of_cols, fit_df) if x_test is not None: fit_x_test = imp.transform(x_test[list_of_cols]) fit_test_df = pd.DataFrame(fit_x_test, columns=list_of_cols) x_test = drop_replace_columns(x_test, list_of_cols, fit_test_df) return x_train, x_test
def replace_missing_median(self, *list_args, list_of_cols=[]): """ Replaces missing values in every numeric column with the median of that column. If no columns are supplied, missing values will be replaced with the mean in every numeric column. Median: Middle value of a list of numbers. Equal to the mean if data follows normal distribution. Not effected much by anomalies. If a list of columns is provided use the list, otherwise use arguemnts. This function exists in `clean/numeric.py` as `replace_missing_mean_median_mode`. Parameters ---------- list_args : str(s), optional Specific columns to apply this technique to. list_of_cols : list, optional Specific columns to apply this technique to., by default [] Returns ------- Clean: Returns a deep copy of the Clean object. """ report_info = technique_reason_repo["clean"]["numeric"]["median"] ## If a list of columns is provided use the list, otherwise use arguemnts. list_of_cols = _input_columns(list_args, list_of_cols) ( self._data_properties.x_train, self._data_properties.x_test, ) = replace_missing_mean_median_mode( x_train=self._data_properties.x_train, x_test=self._data_properties.x_test, list_of_cols=list_of_cols, strategy="median", ) if self.report is not None: if list_of_cols: self.report.report_technique(report_info, list_of_cols) else: list_of_cols = _numeric_input_conditions( list_of_cols, self._data_properties.x_train ) self.report.report_technique(report_info, list_of_cols) return self.copy()
def replace_missing_mostcommon(self, *list_args, list_of_cols=[]): """ Replaces missing values in every numeric column with the most common value of that column Mode: Most common value. If a list of columns is provided use the list, otherwise use arguemnts. This function exists in `clean/numeric.py` as `replace_missing_mean_median_mode`. Parameters ---------- list_args : str(s), optional Specific columns to apply this technique to. list_of_cols : list, optional A list of specific columns to apply this technique to., by default [] Returns ------- Clean: Returns a deep copy of the Clean object. """ report_info = technique_reason_repo["clean"]["numeric"]["mode"] ## If a list of columns is provided use the list, otherwise use arguemnts. list_of_cols = _input_columns(list_args, list_of_cols) ( self._data_properties.x_train, self._data_properties.x_test, ) = replace_missing_mean_median_mode( x_train=self._data_properties.x_train, x_test=self._data_properties.x_test, list_of_cols=list_of_cols, strategy="most_frequent", ) if self.report is not None: if list_of_cols: self.report.report_technique(report_info, list_of_cols) else: list_of_cols = _numeric_input_conditions( list_of_cols, self._data_properties.x_train ) self.report.report_technique(report_info, list_of_cols) return self.copy()
def log_scale(x_train, x_test=None, list_of_cols=[], base=None): """ Scales data logarithmically. Options are '' for natural log, 2 for base2, 10 for base10. Parameters ---------- x_train : DataFrame Dataset x_test : DataFrame Testing dataset, by default None list_of_cols : list, optional A list of specific columns to apply this technique to If `list_of_cols` is not provided, the strategy will be applied to all numeric columns, by default [] base : str, optional Base to logarithmically scale by, by default None Returns ------- Dataframe, *Dataframe Transformed dataframe with rows normalized. Returns 2 Dataframes if x_test is provided. """ list_of_cols = _numeric_input_conditions(list_of_cols, x_train) if not base: log = np.log elif base == 2: log = np.log2 elif base == 10: log = np.log10 else: log = np.log for col in list_of_cols: x_train[col] = log(x_train[col]) if x_test is not None: x_test[col] = log(x_test[col]) return x_train, x_test
def polynomial_features(x_train, x_test=None, list_of_cols=[], **poly_kwargs): """ Computes polynomial features from your existing features. Parameters ---------- x_train : DataFrame Dataset x_test : DataFrame Testing dataset, by default None list_of_cols : list, optional A list of specific columns to apply this technique to If `list_of_cols` is not provided, the strategy will be applied to all numeric columns, by default [] keep_col : bool, optional True to not remove the columns, by default False poly_kwargs : dict or kwargs Polynomial Features constructor key word arguments Returns ------- Dataframe, *Dataframe Transformed dataframe with rows normalized. Returns 2 Dataframes if x_test is provided. """ poly = PolynomialFeatures(**poly_kwargs) list_of_cols = _numeric_input_conditions(list_of_cols, x_train) scaled_data = poly.fit_transform(x_train[list_of_cols]) scaled_df = pd.DataFrame(scaled_data, columns=poly.get_feature_names()) x_train = drop_replace_columns(x_train, list_of_cols, scaled_df) if x_test is not None: scaled_x_test = poly.transform(x_test) scaled_test_df = pd.DataFrame(scaled_x_test, columns=poly.get_feature_names()) x_test = drop_replace_columns(x_test, list_of_cols, scaled_test_df) return x_train, x_test
def normalize_log(self, *list_args, list_of_cols=[], base=1): """ Scales data logarithmically. Options are 1 for natural log, 2 for base2, 10 for base10. Parameters ---------- list_args : str(s), optional Specific columns to apply this technique to. list_of_cols : list, optional A list of specific columns to apply this technique to., by default [] base : str, optional Base to logarithmically scale by, by default '' Returns ------- Preprocess: Returns a deep copy of the Preprocess object. """ report_info = technique_reason_repo["preprocess"]["numeric"]["log"] list_of_cols = _input_columns(list_args, list_of_cols) self._data_properties.x_train, self._data_properties.x_test = log_scale( x_train=self._data_properties.x_train, x_test=self._data_properties.x_test, list_of_cols=list_of_cols, base=base, ) if self.report is not None: if list_of_cols: self.report.report_technique(report_info, list_of_cols) else: list_of_cols = _numeric_input_conditions( list_of_cols, self._data_properties.x_train ) self.report.report_technique(report_info, list_of_cols) return self.copy()
def normalize_quantile_range( self, *list_args, list_of_cols=[], keep_col=True, **robust_params ): """ Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. If `list_of_cols` is not provided, the strategy will be applied to all numeric columns. If a list of columns is provided use the list, otherwise use arguments. For more info please see: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html#sklearn.preprocessing.RobustScaler This function can be found in `preprocess/numeric.py` Parameters ---------- list_args : str(s), optional Specific columns to apply this technique to. list_of_cols : list, optional A list of specific columns to apply this technique to., by default [] with_centering : boolean, True by default If True, center the data before scaling. This will cause transform to raise an exception when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0 Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR Quantile range used to calculate scale_. robust_params : dict, optional Parmaters to pass into MinMaxScaler() constructor from Scikit-Learn Returns ------- Preprocess: Returns a deep copy of the Preprocess object. """ report_info = technique_reason_repo["preprocess"]["numeric"]["robust"] list_of_cols = _input_columns(list_args, list_of_cols) self._data_properties.x_train, self._data_properties.x_test = scale( x_train=self._data_properties.x_train, x_test=self._data_properties.x_test, list_of_cols=list_of_cols, method="robust", keep_col=keep_col, **robust_params, ) if self.report is not None: if list_of_cols: self.report.report_technique(report_info, list_of_cols) else: list_of_cols = _numeric_input_conditions( list_of_cols, self._data_properties.x_train ) self.report.report_technique(report_info, list_of_cols) return self.copy()