Beispiel #1
0
# import nbimporter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings

warnings.filterwarnings('ignore')
warnings.simplefilter(action='ignore', category=FutureWarning)

from src.dataset import Dataset
from dython.nominal import associations

houses = Dataset('./data/houseprices_prepared.csv.gz')
houses.describe()

houses.replace_na(column='Electrical', value='Unknown')
houses.replace_na(column=houses.names('categorical_na'), value='None')
houses.set_target('SalePrice')
houses.describe()

houses.drop_columns('Id')
houses.describe()
    houses.features['Porch_sf'] = houses.features[columns_to_add].sum(axis=1)
    houses.drop_columns(columns_to_add)


def sum_Baths():
    columns_to_add = ['FullBath', 'BsmtFullBath', 'HalfBath', 'BsmtHalfBath']
    houses.features['Total_Baths'] = (houses.features['FullBath'] +
                                      houses.features['BsmtFullBath'] +
                                      (0.5 * houses.features['HalfBath']) +
                                      (0.5 * houses.features['BsmtHalfBath']))
    houses.drop_columns(columns_to_add)


# In[31]:

houses.drop_columns('Id')
sum_SF()
sum_Porch()
sum_Baths()
houses.metainfo()
houses.describe()

# ### Scale numerical features
#
# Standardization of datasets is a common requirement for many machine learning estimators implemented in scikit-learn; they might behave badly if the individual features do not more or less look like standard normally distributed data: Gaussian with zero mean and unit variance.
#
# In practice we often ignore the shape of the distribution and just transform the data to center it by removing the mean value of each feature, then scale it by dividing non-constant features by their standard deviation.
#
# For instance, many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the l1 and l2 regularizers of linear models) assume that all features are centered around zero and have variance in the same order. If a feature has a variance that is orders of magnitude larger than others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected.

# In[32]: