def train_and_predict(train_df, test_df):

    # Data Cleaning
    # clean the data
    cleaner = DataCleaner()
    cleaner.columns_with_no_nan(train_df)
    cleaner.columns_with_no_nan(test_df)
    train_df = cleaner.drop_columns(train_df)
    train_df = cleaner.resolve_nan(train_df)
    test_df = cleaner.drop_columns(test_df)
    test_df = cleaner.resolve_nan(test_df)


    # features engineering
    train_df, test_df = engineer_features(train_df, test_df)

    # train the model from Model
    model = Classifier()
    model = model.model()

    # LabelEncoding/OneHotEncoding?
    train_df = model.encode(train_df)
    test_df = model.encode(test_df)

    # training progress and results
    model = model.train(model, train_df)

    # predict on test_df with predict method from Model
    y_test = model.predict(model, test_df)
    return y_test
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score, f1_score

from question_query import create_questions_df
from answer_query import create_answers_df
from data_cleaning import DataCleaner
from model_tester import FindOptimalModels


if __name__ == '__main__':
    numrows = 1e6
    print("Connecting and getting ~{}".format(numrows))
    a = create_answers_df(numrows)
    print("Got rows, cleaning data")
    a_train_dc = DataCleaner(a, questions=False, training=True,
                             simple_regression=True, time_split=False,
                             normalize=False)
    A, b = a_train_dc.get_clean()

    default_models = [RandomForestRegressor, GradientBoostingRegressor]

    param_dict = {'rf': {'n_estimators': [50, 100, 5000], 'max_depth':
                  [2, 3, 5]},
                  'gbr': {'learning_rate': [.001, .01, .1, .2], 'max_depth':
                          [2, 3, 5], 'n_estimators': [50, 100, 5000]}}
    print('Finding optimal models')
    finder = FindOptimalModels(A, b, question=False, time_split=False)
    finder.baseline_model()
    fitted_models = finder.run_default_models(default_models)
    print("starting grid search")
    opt_params = finder.run_grid_search(fitted_models, param_dict)
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score, f1_score

from question_query import create_questions_df
from answer_query import create_answers_df
from data_cleaning import DataCleaner
from model_tester import FindOptimalModels

if __name__ == '__main__':
    numrows = 1e6
    print("Connecting and getting ~{}".format(numrows))
    q = create_questions_df(numrows)
    print("Got rows, cleaning data")
    q_train_dc = DataCleaner(q,
                             questions=True,
                             training=True,
                             simple_regression=True,
                             time_split=True,
                             normalize=True)
    X, y = q_train_dc.get_clean()

    default_models = [RandomForestRegressor, GradientBoostingRegressor]

    param_dict = {
        'rf': {
            'n_estimators': [50, 100, 5000],
            'max_depth': [2, 3, 5]
        },
        'gbr': {
            'learning_rate': [.001, .01, .1, .2],
            'max_depth': [2, 3, 5],
            'n_estimators': [50, 100, 5000]
Example #4
0
    gps = pd.read_csv('./data/Longitud_Latitud.csv')
    # Create sub_area categorical with all levels shared
    # between train and test to avoid errors
    test['price_doc'] = -99
    merged = pd.concat([train, test], axis=0)
    merged = merged.merge(gps, how='left', on='sub_area')
    merged['sub_area'] = merged.sub_area.astype('category')
    train = merged[merged.price_doc != -99]
    test = merged[merged.price_doc == -99]
    test.pop('price_doc')

    macro = pd.read_csv('data/macro.csv', parse_dates=['timestamp'])
    train = train.merge(macro, how='left', on='timestamp', suffixes=('_train', '_macro'))

    # Clean
    dc = DataCleaner(data=train, sample_rate=0.3)
    data, y = dc.clean()
    y = np.array(y)
    y = np.log(y+1)

    # Train / test split
    data_train, data_test, y_train, y_test = train_test_split(data, y, random_state=77)
    house_ids_test = data_test.id

    # Featurize training data set
    feat_train = Featurizer()
    X_train = feat_train.featurize(data_train)

    # Grid search tune all estimators
    ms = ModelSelector()
    print ' # {:s} | X_train shape: {:s}'.format(now(), X_train.shape)
import numpy as np
from data_cleaning import DataCleaner
from features_engineering import FeatureExtractor
from model_selection import ModelSelector
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
plt.interactive(True)

if __name__ == '__main__':
    # read and clean the data
    dc = DataCleaner()
    data = dc.clean()

    # Debug transformations
    # data.to_csv('./data/debug.csv', index=False, encoding='latin1')
    # assert False

    # separate target variable
    target = data.pop('Target')

    # train test split
    data_train, data_test, target_train, target_test = train_test_split(
        data, target)

    # featurize data
    featurizer = FeatureExtractor()
    X_train = featurizer.featurize(data_train)
    X_test = featurizer.featurize(data_test)

    # Convert to numpy arrays
    y_train = np.array(target_train)
    test = pd.read_csv('./data/test.csv')
    gps = pd.read_csv('./data/Longitud_Latitud.csv')
    # Create sub_area categorical with all levels shared
    # between train and test to avoid errors
    test['price_doc'] = -99
    merged = pd.concat([train, test], axis=0)
    merged = merged.merge(gps, how='left', on='sub_area')
    merged['sub_area'] = merged.sub_area.astype('category')
    train = merged[merged.price_doc != -99]

    train = train.merge(macro,
                        how='left',
                        on='timestamp',
                        suffixes=('_train', '_macro'))

    dc = DataCleaner(data=train)
    train, y = dc.clean()
    y = np.array(y)
    y = np.log(y + 1)

    # Featurize training data set
    feat_train = Featurizer()
    train = feat_train.featurize(train)

    print 'train shape', train.shape

    # # Remove all categorical variables for now
    # mask = ~(train.dtypes == 'object').values
    # train = train.iloc[:, mask]
    # print 'train shape with only numerical features', train.shape