Example #1
0
def generate_optimal_pipeline(features, outcomes, output_dir, memory=None):
    """
    Generate a training set and test set from data
    Parameters:
    - features: The feature set to train on. In scikit-learn this is often refered to as X.
    - outcomes: The labeled outcomes we are trying to predict. In scikit-learn this often refered to as y.
    - output_dir: The directory that TPOT can write to. This will be where the cache and intermitent save are written.
    Output:
    The TPOT object returned by fitting TPOTRegressor https://epistasislab.github.io/tpot/api/
    """
    if (features.shape[0] * features.shape[1] > (50000 * 250)):
        config_dict = 'TPOT light'
    else:
        config_dict = None

    pipeline_optimizer = TPOTRegressor(
        generations=GENERATIONS,
        population_size=POPULATION_SIZE,
        verbosity=get_verbosity('tpot'),
        random_state=RANDOM_STATE,
        template='Selector-Transformer-Regressor',
        n_jobs=N_JOBS,
        warm_start=True,
        memory=memory,
        config_dict=config_dict,
        periodic_checkpoint_folder='{DIR}/tpot-intermediate-save/'.format(
            DIR=output_dir))

    pipeline_optimizer.fit(features, outcomes)
    return pipeline_optimizer
Example #2
0
def train_tpot(l=None):
    # can also do directly from the command line
    if l is None:
        l = get_data()
    model = TPOTRegressor(
        config_dict=None,
        crossover_rate=0.1,
        cv=5,
        disable_update_check=False,
        early_stop=None,
        generations=100,
        max_eval_time_mins=5,
        max_time_mins=None,
        memory=_tpot_cache,
        mutation_rate=0.9,
        n_jobs=-1,
        offspring_size=None,
        periodic_checkpoint_folder='tpot_periodic_checkpoint',
        population_size=100,
        random_state=None,
        scoring=None,
        subsample=1.0,
        use_dask=False,
        verbosity=1,
        warm_start=False)
    model.fit(l.X_train.copy(), l.y_train.copy())
    # to be safe:
    model.export('tpot_exported_pipeline.py')
    return attributedict_from_locals('model')
Example #3
0
    def run_example(self):

        train = pd.read_csv("./data/churn-train.csv")
        #dummy_train = pd.get_dummies(train[categorical_cols])
        categorical_feature_mask = train.dtypes == object
        categorical_cols = train.columns[categorical_feature_mask].tolist()
        le = LabelEncoder()
        #le.fit(train[categorical_cols])
        #le.transform(train[categorical_cols])
        train[categorical_cols] = train[categorical_cols].apply(
            lambda col: le.fit_transform(col))
        # numpy
        X_train = train.drop(columns=['churn_probability']).to_numpy()
        y_train = train["churn_probability"].to_numpy()

        test = pd.read_csv("./data/churn-test.csv")
        #dummy_new = pd.get_dummies(test[categorical_cols])
        test[categorical_cols] = test[categorical_cols].apply(
            lambda col: le.fit_transform(col))
        X_test = test.drop(columns=['churn_probability']).to_numpy()
        y_test = test["churn_probability"].to_numpy()

        tpot = TPOTRegressor(generations=5,
                             population_size=50,
                             verbosity=2,
                             random_state=42,
                             scoring='neg_mean_absolute_error',
                             cv=5)
        tpot.fit(X_train, y_train)
        print(tpot.score(X_test, y_test))
        tpot.export('tpot_iris_pipeline.py')

        return tpot.score(X_test, y_test)
Example #4
0
def train_tpot(name, X, y, gen, cores):

    test_name = str('gen_' + str(gen) + name + '_' + time.strftime('%y%m%d'))

    print('Training with TPOT .... ', test_name)
    t1 = time.time()

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.75,
                                                        test_size=0.25)
    tpot = TPOTRegressor(generations=gen,
                         population_size=50,
                         verbosity=2,
                         n_jobs=cores)
    tpot.fit(X_train, y_train.reshape(-1, ))

    print(tpot.score(X_test, y_test))
    t2 = time.time()
    delta_time = t2 - t1
    print('Time to train...:', delta_time)

    print('Saving the model ...')
    tpot.export('trained_models/' + test_name + '.py')
    joblib.dump(tpot.fitted_pipeline_, 'trained_models/' + test_name + '.pk1')
    print(test_name, ' saved ... ')
Example #5
0
def works():
    config_dict = {
        'sklearn.gaussian_process.GaussianProcessRegressor': {
            'alpha': [1e1, 1, 1e-1]
        }
    }
    model = TPOTRegressor(config_dict=config_dict,
                          crossover_rate=0.1,
                          cv=5,
                          disable_update_check=False,
                          early_stop=None,
                          generations=10,
                          max_eval_time_mins=5,
                          max_time_mins=None,
                          mutation_rate=0.9,
                          n_jobs=-1,
                          offspring_size=None,
                          population_size=100,
                          random_state=None,
                          scoring=None,
                          subsample=1.0,
                          use_dask=False,
                          verbosity=3,
                          warm_start=False)
    model.fit(X, y)
Example #6
0
def tpot_test(conf):
    from tpot import TPOTRegressor
    from sklearn.model_selection import train_test_split
    from sklearn.model_selection import TimeSeriesSplit

    p.load_config(conf)
    ds = dl.load_price_data()
    ds = add_features(ds)

    X = ds[p.feature_list][:-1]
    y = ds['DR'].shift(-1)[:-1]

    # Split Train and Test
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.8,
                                                        test_size=0.2)

    tpot = TPOTRegressor(n_jobs=-1,
                         verbosity=2,
                         max_time_mins=60,
                         cv=TimeSeriesSplit(n_splits=3))

    tpot.fit(X_train, y_train)
    print(tpot.score(X_test, y_test))
    tpot.export('./tpot_out.py')
Example #7
0
def fails():
    config_dict = {
        'example_tpot.MyGP': {
            'alpha': [1e1, 1, 1e-1],
            # 'mu_x': np.logspace(-2, 4, 10),
            # 'mu_y': np.logspace(-2, 4, 10),
        }
    }
    model = TPOTRegressor(config_dict=config_dict,
                          crossover_rate=0.1,
                          cv=5,
                          disable_update_check=False,
                          early_stop=None,
                          generations=10,
                          max_eval_time_mins=5,
                          max_time_mins=None,
                          mutation_rate=0.9,
                          n_jobs=-1,
                          offspring_size=None,
                          population_size=100,
                          random_state=None,
                          scoring=None,
                          subsample=1.0,
                          use_dask=False,
                          verbosity=3,
                          warm_start=False)
    model.fit(X, y)
Example #8
0
def tpot(use_dask=True):
    # TODO: Add some documentation...
    # TODO: Investigate why tpot crashes when uing Dask (probably a RAM problem).
    if use_dask:
        client = Client("tcp://192.168.1.94:8786")
        print(client)
    tpot_reg = TPOTRegressor(generations=TPOT_GENERATIONS,
                             population_size=TPOT_POPULATION_SIZE,
                             random_state=SEED,
                             cv=CV,
                             use_dask=use_dask,
                             verbosity=2,
                             memory="auto")
    df = pd.read_csv("elo/data/augmented_train.csv")
    print(df.sample(5))
    # TODO: Find a better way to impute inf and missing values.
    df = df.replace([np.inf, -np.inf], np.nan)
    df = df.fillna(df.median())
    X = df.drop(FEATS_EXCLUDED, axis=1, errors='ignore').values
    y = df.loc[:, "target"].values

    if use_dask:
        with ProgressBar() as pbar, Profiler() as prof:
            tpot_reg.fit(X, y)
    else:
        tpot_reg.fit(X, y)
    export_path = str(
        Path('elo/data/tpot_few_generations_augmented_dataset.py').absolute())
    tpot_reg.export(export_path)
    return tpot_reg
Example #9
0
def train_and_pickle_best_model(target, X, y, val_X, val_y):
    print('AutoML Search for good model for {}'.format(target))
    pipeline_optimizer = TPOTRegressor(
        generations=10,
        population_size=150,
        cv=3,
        random_state=0xDEADBEEF,
        verbosity=3,
        scoring='r2',
        n_jobs=-1,
        early_stop=5,
        periodic_checkpoint_folder='tpot_checkpoint')
    pipeline_optimizer.fit(X, y)
    new_preds = pipeline_optimizer.predict(val_X)
    mae = mean_absolute_error(val_y, new_preds)
    rmse = sqrt(mean_squared_error(val_y, new_preds))
    r2 = r2_score(val_y, new_preds)
    print("TPOT mae:", mae)
    print("TPOT rmse:", rmse)
    print("TPOT R^2 score:", r2)
    pipeline_optimizer.export(
        'models/tpot_exported_pipeline_{}.py'.format(target))
    dump(pipeline_optimizer.fitted_pipeline_,
         'models/{}-best-model-automl.joblib'.format(target))
    return r2, mae, rmse
Example #10
0
 def tpotregressor(self, i):
     print("TPOTRegressor")
     tpot = TPOTRegressor(generations=50, population_size=50, verbosity=2, random_state=42)
     tpot.fit(self.X, self.y)
     #print(tpot.score(X, y_test2))
     tpot.export('tpot_covid_pipeline_day_' + str(i) + '.py')
     print("\n")
     return None
Example #11
0
def auto_ml(X_train, X_test, y_train, y_test):
    tpot = TPOTRegressor(generations=30,
                         population_size=200,
                         verbosity=2,
                         periodic_checkpoint_folder="tpot_checkpoint/")
    tpot.fit(X_train, y_train)
    print(tpot.score(X_test, y_test))
    tpot.export('tpot_pipeline.py')
Example #12
0
def tpotRegressor(train_data, target_value):
    regressor = TPOTRegressor()
    X_train, X_test, y_train, y_test = train_test_split(
        train_data, train_data[target_value], train_size=0.75, test_size=0.25)
    regressor.fit(X_train, y_train)
    score = regressor.score(X_test, y_test)
    regressor.export('my_pipeline.py')
    return regressor, score
Example #13
0
def tpot_regression(x_calib,
                    y_calib,
                    x_prod,
                    y_prod,
                    results_direct,
                    cv_folds,
                    error_metric,
                    num_jobs,
                    gens,
                    pop,
                    mins,
                    mins_per_pipeline,
                    verbose,
                    early_stop_generations,
                    tpot_config_dict,
                    model_name='tpot_best'):

    checkpoint_folder = results_direct + 'checkpoint_folder/'
    if not Path(checkpoint_folder).is_dir():
        os.mkdir(checkpoint_folder)

    ml_model = TPOTRegressor(generations=gens,
                             population_size=pop,
                             scoring=error_metric,
                             max_time_mins=mins,
                             cv=cv_folds,
                             verbosity=verbose,
                             n_jobs=num_jobs,
                             early_stop=early_stop_generations,
                             max_eval_time_mins=mins_per_pipeline,
                             config_dict=tpot_config_dict,
                             periodic_checkpoint_folder=checkpoint_folder)

    ml_model.fit(x_calib, y_calib)

    # save entire pipeline
    ml_model.export(results_direct + model_name + '.py')
    joblib.dump(ml_model.fitted_pipeline_,
                results_direct + model_name + '.sav')

    # for cross valdation errors see the exported model py file

    # production - results and errors
    y_prod_predict = ml_model.predict(x_prod)
    np.save(results_direct + model_name + '_prod_predicted.npy',
            y_prod_predict)

    df_prod_errors = pd.DataFrame(index=[
        'Mean Squared Error', 'Median Absolute Error',
        'Correlation Coefficient', 'R2'
    ])
    df_prod_errors['TPOT Best'] = [
        mean_squared_error(y_prod, y_prod_predict),
        median_absolute_error(y_prod, y_prod_predict),
        np.corrcoef(y_prod, y_prod_predict)[0][-1],
        r2_score(y_prod, y_prod_predict)
    ]
    df_prod_errors.to_csv(results_direct + model_name + '_prod_errors.csv')
Example #14
0
def build_regressor(data, name):
	X, y = data
	config = make_tpot_pmml_config(regressor_config_dict)
	del config["sklearn.neighbors.KNeighborsRegressor"]
	regressor = TPOTRegressor(generations = 3, population_size = 3, random_state = 13, config_dict = config, verbosity = 2)
	regressor.fit(X, y)
	pipeline = make_pmml_pipeline(regressor.fitted_pipeline_, active_fields = X.columns.values, target_fields = [y.name])
	print(repr(pipeline))
	store_pkl(pipeline, name)
	result = DataFrame(regressor.predict(X), columns = [y.name])
	store_csv(result, name)
def go_tpot():
    from tpot import TPOTRegressor
    import datetime
    tpot = TPOTRegressor(generations=5,
                         population_size=20,
                         verbosity=3,
                         scoring='mean_absolute_error')
    tpot.fit(X_train, y_train)
    print(tpot.score(X_test, y_test))
    tpot.export('../models/tpot_pipeline_' +
                datetime.datetime.now().strftime('%Y.%m.%d_%H%M%S') + '.py')
Example #16
0
def tpotting():
    from tpot import TPOTRegressor
    """deprecated"""
    for target in range(4):  # replace with real targets
        tpot = TPOTRegressor(
            verbosity=2,
            cv=5,
            random_state=2017,
            n_jobs=4,
            periodic_checkpoint_folder='out/out_{}'.format(target))
        tpot.fit(tra_df['x_cols'], tra_df[target])
        tpot.export('out/tpotted_{}.py'.format(target))
Example #17
0
def fit_model0_adsorption_energies(adsorbate):
    '''
    Create and save a modeling pipeline to predict adsortion energies.

    Arg:
        adsorbate   String indicating which adsorbate you want to fit the model
                    for
    Saves:
        pipeline    An `sklearn.pipeline.Pipeline` object that is fit to our
                    data and can be used to make predictions on adsorption
                    energies.  The pipeline is automatically saved to our GASdb
                    cache location, which is specified as 'gasdb_path' in the
                    `gaspyrc.json` file.
    '''
    model_name = 'model0'

    print('[%s] Making %s pipeline/regression for %s...' %
          (datetime.utcnow(), model_name, adsorbate))

    # Fit the transformers and models
    docs = get_adsorption_docs(adsorbate=adsorbate)
    energies_dft = np.array([doc['energy'] for doc in docs])
    inner_fingerprinter = fingerprinters.InnerShellFingerprinter()
    outer_fingerprinter = fingerprinters.OuterShellFingerprinter()
    fingerprinter = fingerprinters.StackedFingerprinter(
        inner_fingerprinter, outer_fingerprinter)
    scaler = StandardScaler()
    pca = PCA()
    preprocessing_pipeline = Pipeline([('fingerprinter', fingerprinter),
                                       ('scaler', scaler), ('pca', pca)])
    features = preprocessing_pipeline.fit_transform(docs)
    tpot = TPOTRegressor(generations=2,
                         population_size=32,
                         offspring_size=32,
                         verbosity=2,
                         scoring='neg_median_absolute_error',
                         n_jobs=16)
    tpot.fit(features, energies_dft)

    # Make the pipeline
    steps = [('fingerprinter', fingerprinter), ('scaler', scaler),
             ('pca', pca)]
    for step in tpot.fitted_pipeline_.steps:
        steps.append(step)
    pipeline = Pipeline(steps)

    # Save the pipeline
    file_name = GASDB_LOCATION + '/pipeline_%s_%s.pkl' % (adsorbate,
                                                          model_name)
    with open(file_name, 'wb') as file_handle:
        pickle.dump(pipeline, file_handle)
Example #18
0
def regression():
    housing = load_boston()
    X_train, X_test, y_train, y_test = train_test_split(housing.data,
                                                        housing.target,
                                                        train_size=0.75,
                                                        test_size=0.25,
                                                        random_state=42)
    tpot = TPOTRegressor(generations=5,
                         population_size=50,
                         verbosity=2,
                         random_state=42)
    tpot.fit(X_train, y_train)
    print(tpot.score(X_test, y_test))
    tpot.export('tpot_boston_pipeline.py')
def rolling_forecasts(data, target):
    """
    Fits the rolling forecast model
    :param data: feature Dataframe
    :param window: lookback window
    :param horizon: forecast horizon
    :param target: variable to be forecasted
    :return:
    """
    model = TPOTRegressor(generations=5, population_size=50, verbosity=2)
    model.fit(data.values, target)
    # for i in range(0, ldf.shape[0] - window):
    #     model.fit(ldf.values[i:i + window, :], ldf['target'].values[i:i + window])

    return model
Example #20
0
    def regression(self, timeMax=60):
        def rmse_scorer(y_true, y_pred):
            return mean_squared_error(y_true, y_pred, squared=False)

        my_custom_scorer = make_scorer(rmse_scorer, greater_is_better=False)

        print(f"Starting regression with {self.modelName}")
        X_train, X_test, y_train, y_test = self.dataFunction(
            preprocessed=self.preprocessed,
            specifics="TPOT",
            trainSize=self.trainSize,
            nDataPoints=self.nDataPoints)

        # Change dict for prediction model
        config_copy = regressor_config.copy()
        config_copy.update(self.model)

        # TPOT automated feature engineering
        start_time = time.time()
        tpot = TPOTRegressor(generations=self.generations,
                             population_size=self.popSize,
                             verbosity=2,
                             config_dict=config_copy,
                             max_time_mins=timeMax,
                             max_eval_time_mins=30,
                             cv=4,
                             scoring=my_custom_scorer)

        tpot.fit(X_train, y_train)
        total_time = int(divmod(time.time() - start_time, 60)[0])
        print(tpot.evaluated_individuals_)
        print(f"Time: {total_time}")

        # prediction score
        predictionScore = int(-tpot.score(X_test, y_test))
        print(f"Final MSE prediction score: {predictionScore}")

        # Export model
        tpot.export(
            f'{self.savePath}/time{total_time}_score{predictionScore}_trainSize{self.trainSize}_PIPE.py'
        )
        # Export History
        with open(f'{self.savePath}/performance_history.pkl', "wb") as handle:
            pickle.dump(tpot.evaluated_individuals_, handle)
        # Export pareto front
        with open(f'{self.savePath}/PARETO.pkl', "wb") as handle:
            pickle.dump(tpot.pareto_front_fitted_pipelines_, handle)
Example #21
0
def model_selection_and_HPO(dataframe, target="job_performance", test_size=0.25, r_seed=123):
    """ Pass in the dataframe that has gone through feature selection
    Uses the TPOT regressor module from TPOT to perform MS and HPO. As this modeling uses some element
    of stochasticity, it may provide different results every time. The longer you run this,
    the more similar the final models will look like in the end.
    
    Finally outputs a .py file with the selected model and its hyperparameters, for which we can import.
    """
    import TPOT 
    from sklearn.model_selection import train_test_split
    import timeit
    from tpot import TPOTRegressor
    from sklearn.metrics import (
        confusion_matrix,
        roc_auc_score,
        precision_recall_fscore_support,
        accuracy_score,
    )

    # train test split
    X_train, X_test, y_train, y_test = train_test_split(
        dataframe.loc[:, dataframe.columns != target].values,
        dataframe[target].values.ravel(),
        test_size=test_size,
        random_state=r_seed)
    
    y_train = y_train.ravel()
    y_test = y_test.ravel()

    # model selection and hyperparameter optimization with TPOT Regressor
    tpot_regressor = TPOTRegressor(generations=20, 
                                   population_size=50, 
                                   cv=10,
                                   random_state=r_seed, 
                                   verbosity=2, 
                                   memory='auto')
    
    start_time = timeit.default_timer()
    tpot_regressor.fit(X_train, y_train)
    y_pred = tpot_regressor.predict(X_test)
    end_time = timeit.default_timer()

    print(f"Total runtime for the Employee dataset: {end_time-start_time}s")
    print("TPOT Score: {}".format(tpot_regressor.score(X_test, y_test)))

    tpot_regressor.export('tpot_exported_pipeline.py')
Example #22
0
def tpot_fit_pred(X_train, y_train, X_test, id_test, name_dataset, id_name,
                  target_name):
    tp = TPOTRegressor(verbosity=2)
    start_time = timer(None)
    tp.fit(X_train, y_train)
    tp.export('tpot_pipeline_dont_overfit.py')
    time = timer(start_time)
    preds = tp.predict(X_test)

    time_out = open(name_dataset + '_' + 'tpot', "w")
    time_out.write(time)
    time_out.close()

    submission = pd.DataFrame({id_name: id_test, target_name: preds})

    submission.to_csv('submission_' + name_dataset + '_' + 'tpot.csv',
                      index=False)
Example #23
0
 def fit_single_output(row):
     tpot = TPOTRegressor(generations=generations,
                          population_size=population_size,
                          verbosity=2,
                          n_jobs=1,
                          config_dict='TPOT light')
     fit_model = tpot.fit(X, row).fitted_pipeline_
     print(tpot.score(X, row))
     return fit_model
Example #24
0
    def fit(self):
        X_train, X_test, y_train, y_test = train_test_split(
            self.X, self.y, train_size=self.train_size, random_state=0)

        tpot = TPOTRegressor(generations=self.generation,
                             population_size=self.generation,
                             verbosity=3,
                             warm_start=True,
                             config_dict=self.config_dict)
        startTime = datetime.datetime.now()
        tpot.fit(X_train, y_train)

        endTime = datetime.datetime.now()

        predict_score = tpot.score(X_test, y_test)
        cost_time = endTime - startTime

        return predict_score, cost_time
def functionRegression(sparkDF, listOfFeatures, label):
    sparkDF.persist(pyspark.StorageLevel.MEMORY_AND_DISK)
    df = sparkDF.toPandas()
    df.columns.intersection(listOfFeatures)
    X = df.drop(label, axis=1).values
    y = df[label].values
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        random_state=1,
                                                        test_size=0.2)
    tpotModel = TPOTRegressor(verbosity=3,
                              generations=10,
                              max_time_mins=15,
                              n_jobs=-1,
                              random_state=25,
                              population_size=15)
    tpotModel.fit(X_train, y_train)
    print(tpotModel.score(X_test, y_test))
Example #26
0
def train_tpot_regressor(dat, target):
    # print(target)
    df = dat.rename(columns={target: 'class'})
    X = df.drop(columns='class')
    y = df['class'].copy()
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.5,
                                                        random_state=42)
    my_custom_scorer = make_scorer(my_custom_accuracy, greater_is_better=True)

    tpot = TPOTRegressor(generations=1,
                         population_size=20,
                         verbosity=2,
                         scoring=my_custom_scorer)
    tpot.fit(X_train, y_train)
    #tpot.export('tpot_mnist_pipeline_'+tname+"_"+ str(target) + '.py')
    with open('tpot_pkl_' + tname + "_" + str(target) + '.pkl', 'wb') as f:
        pickle.dump(tpot, f)
Example #27
0
def train_gpr_tpot(l=None):
    # with auto tuning
    if l is None:
        l = get_data()
    config_dict = {
        'sklearn.gaussian_process.GaussianProcessRegressor': {
            'alpha': np.logspace(-10, 1, 12),
        },
        'sklearn.pipeline.FeatureUnion': {},
        'sklearn.preprocessing.QuantileTransformer': {},
        'sklearn.preprocessing.MinMaxScaler': {},
        # 'competitions.MyGP': {
        #     'alpha':np.logspace(-10, 1, 12),
        #     'mu_x': np.logspace(-1, 2, 4),
        #     'mu_y': np.logspace(-1, 2, 4),
        #     }
    }
    model = TPOTRegressor(
        config_dict=config_dict,
        crossover_rate=0.1,
        cv=5,
        disable_update_check=False,
        early_stop=None,
        generations=10,
        max_eval_time_mins=5,
        max_time_mins=None,
        # memory=os.path.join(_mydir, 'tpot_cache'),
        mutation_rate=0.9,
        n_jobs=-1,
        offspring_size=None,
        # periodic_checkpoint_folder='periodic_checkpoint_gpr_tpot',
        population_size=100,
        random_state=None,
        scoring=None,
        subsample=1.0,
        use_dask=False,
        verbosity=3,
        warm_start=False)
    model.fit(l.X_train.copy(), l.y_train.copy().squeeze())
    model.export('tpot_gpr.py')
    return attributedict_from_locals('model')
Example #28
0
def TPOTRegressor(ATM):
    X = ATM.inputs["X"]
    y = ATM.inputs["y"]
    tpot = TPOTRegressor(generations=ATM.props["generations"],
                         population_size=ATM.props["population_size"],
                         verbosity=ATM.props["verbosity"],
                         random_state=ATM.props["random_state"])
    tpot.fit(X, y)
    ATM.report({
        'name': "stats",
        'stats': {
            'score': tpot.score(payload.X_test, y_test)
        }
    })
    ATM.report({
        'name': "log",
        'payload': {
            'model': tpot.export()
        }
    })
    ATM.save("model.tpot", tpot.export())
Example #29
0
def show_data(dataset_train, classifier_name, params):
    st.write("Training dataset:", dataset_train)
    X = dataset_train.values[:, 1:]
    y = dataset_train.values[:, 0]
    st.write('Shape of dataset:', X.shape, '=> ', X.shape[0], 'rows and ',
             X.shape[1], 'columns of dataset')
    st.write(f'Classifier = {classifier_name}',
             '=> model to train the dataset')

    generation = params['2.1 Tune parameter: Generation (Epoch)']
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.75,
                                                        test_size=0.25,
                                                        random_state=42)

    tpot = TPOTRegressor(generations=generation,
                         population_size=50,
                         verbosity=2,
                         random_state=42)  #generations=5
    tpot.fit(X_train, y_train)
    #st.write('Info for reference only:', tpot.fit(X_train, y_train))
    #print(tpot.score(X_test, y_test))

    tpot.export('tpot_boston_pipeline.py')
    #tpot.log('tpot_progress_content.txt')
    MSE = abs(tpot.score(X_test, y_test))
    st.write("MSE (Mean Squared Error):", MSE.round(2))

    #st.write(tpot.evaluated_individuals_)

    # save the model to disk
    #model=tpot
    #pickle.dump(model, open(filename, 'wb'))

    #from joblib import dump, load
    #dump(tpot, 'filename.joblib')

    #https://github.com/EpistasisLab/tpot/issues/11#issuecomment-341421022
    pickle.dump(tpot.fitted_pipeline_, open(filename, 'wb'))
def ensemble_tpot(city, state, target, horizon, lookback):
    with open('../analysis/clusters_{}.pkl'.format(state), 'rb') as fp:
        clusters = pickle.load(fp)
        data, group = get_cluster_data(city,
                                       clusters=clusters,
                                       data_types=DATA_TYPES,
                                       cols=PREDICTORS)

    casos_est_columns = ['casos_est_{}'.format(i) for i in group]
    casos_columns = ['casos_{}'.format(i) for i in group]

    data = data.drop(casos_columns, axis=1)
    data_lag = build_lagged_features(data, lookback)
    data_lag.dropna()

    X_data = data_lag.drop(casos_est_columns, axis=1)
    X_train, X_test, y_train, y_test = train_test_split(X_data,
                                                        data_lag[target],
                                                        train_size=0.7,
                                                        test_size=0.3,
                                                        shuffle=False)

    tgt_full = data_lag[target].shift(-(horizon - 1))[:-(horizon - 1)]
    tgt = tgt_full[:len(X_train)]
    tgtt = tgt_full[len(X_train):]

    model = TPOTRegressor(generations=20,
                          population_size=100,
                          verbosity=2,
                          n_jobs=32)
    model.fit(X_train, target=tgt)
    model.export('tpot_{}_pipeline.py'.format(city))
    print(model.score(X_test[:len(tgtt)], tgtt))

    pred = plot_prediction(X_data[:len(tgt_full)], tgt_full, model,
                           'Out_of_Sample_{}_{}'.format(horizon,
                                                        city), horizon)
    plt.show()
    return pred
test = combi[train.shape[0]:]
test.drop('Item_Outlet_Sales',axis=1,inplace=True)

## removing id variables 
tpot_train = train.drop(['Outlet_Identifier','Item_Type','Item_Identifier'],axis=1)
tpot_test = test.drop(['Outlet_Identifier','Item_Type','Item_Identifier'],axis=1)
target = tpot_train['Item_Outlet_Sales']
tpot_train.drop('Item_Outlet_Sales',axis=1,inplace=True)

# finally building model using tpot library
from tpot import TPOTRegressor

X_train, X_test, y_train, y_test = train_test_split(tpot_train, target,train_size=0.75, test_size=0.25)

tpot = TPOTRegressor(generations=5, population_size=50, verbosity=2)
tpot.fit(X_train, y_train)
print(tpot.score(X_test, y_test))

tpot.export(data+'tpot_boston_pipeline.py')

## predicting using tpot optimised pipeline
tpot_pred = tpot.predict(tpot_test)
sub1 = pd.DataFrame(data=tpot_pred)

#sub1.index = np.arange(0, len(test)+1)
sub1 = sub1.rename(columns = {'0':'Item_Outlet_Sales'})
sub1['Item_Identifier'] = test['Item_Identifier']
sub1['Outlet_Identifier'] = test['Outlet_Identifier']
sub1.columns = ['Item_Outlet_Sales','Item_Identifier','Outlet_Identifier']
sub1 = sub1[['Item_Identifier','Outlet_Identifier','Item_Outlet_Sales']]
sub1.to_csv('tpot.csv',index=False)
def model_dev(train_set,matchups,spreads):
	""" Create the testing set for the algo creation """
	# Create a sample set to pass into the machine learning algorithm
	X = train_set[['rush_attempt_diff', 'turn_diff', 'yards_diff', 'third_diff', 'sack_diff', 'sack_ydiff', 'poss_diff', 'p_attempt_diff']].copy()
	# X = df[['poss_diff', 'third_diff', 'turn_diff', 'pass_diff', 'rush_diff']].copy()

	# Create results vector (a home win = 1, a home loss or tie = 0)
	train_set.rename(columns={'result_spread':'class'},inplace=True)
	y = train_set['class']#np.array(np.where(df['home_score'] > df['away_score'], 1, 0))

	""" Train, test, and predict the algorithm """
	# Scale the sample data
	scaler = preprocessing.StandardScaler().fit(X)
	X = scaler.transform(X)

	# Delete the dataframe to clear memory
	del train_set

	# Split out training and testing data sets
	X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.25,random_state=0)

	# alphas = [0.1, 0.3, 0.9, 1.0, 1.3, 1.9, 2.0, 2.3, 2.9]
	# for alpha in alphas:
	# 	reg = linear_model.Ridge(alpha = alpha)
	# 	reg.fit(X_train,y_train)
	# 	print 'alpha = ',alpha,', score = ',reg.score(X_test,y_test)
	# input()
	pipeline_optimizer = TPOTRegressor(generations = 5, population_size = 10, random_state = 42, cv = 5, verbosity = 2, n_jobs = 3)#, scoring = 'f1')
	pipeline_optimizer.fit(X_train,y_train)
	print pipeline_optimizer.score(X_test,y_test)
	pipeline_optimizer.export('NFL_ML_TPOT_Regressor.py')

	# Remove the 'week' 'home_team' and 'away_team' columns from matchups as they are not used in the algorithm
	matchups.drop(['week', 'home_team', 'away_team'], axis=1, inplace=True)


	"""
	for feat in range(1,len(matchups.columns)):
		for c in C_vec:
			# Create the classifier and check the score
			# clf = LogisticRegression()
			clf = linear_model.LogisticRegression(C=c,random_state=42)
			selector = RFE(clf)
			selector = selector.fit(X_train,y_train)

			# Calculate probabilities using the predict_proba method for logistic regression
			probabilities = selector.predict_proba(scaler.transform(matchups))

			# Vectorize the spread_conversion function and apply the function to the probabilities result vector
			vfunc = np.vectorize(spread_conversion)
			predicted_spreads = np.apply_along_axis(vfunc,0,probabilities[:,0])

			# If the actual line for the home team is lower than the predicted line then you would take the away team, otherwise take the home team
			bet_vector = np.array(np.where(predicted_spreads > spreads,0,1))

			# Create the actual result vector where a tie counts as a loss for the home team
			game_result = np.array(np.where(home_score.ix[:,0] + predicted_spreads[:] > away_score.ix[:,0], 1, 0))

			# Check to see where the bet_vector equals the actual game result with the spread included
			result = np.array(np.where(bet_vector == game_result,1,0))

			prob_result = float(np.sum(result)) / len(result)

			# print 'Number of features =', feat, 'C =',c,'  Percent correct =',prob_result

			if prob_result > prob_val:
				prob_val = prob_result
				C_val = c
				feat_val = feat

	print 'Score =',selector.score(X_test,y_test)
	# print prob_val, C_val, feat

	clf = linear_model.LogisticRegression(C=C_val,random_state=42)
	clf = clf.fit(X_train,y_train)
	probabilities = clf.predict_proba(scaler.transform(matchups))
	vfunc = np.vectorize(spread_conversion)
	predicted_spreads = np.apply_along_axis(vfunc,0,probabilities[:,0])
	"""

	predicted_spreads = pd.DataFrame(pipeline_optimizer.predict(scaler.transform(matchups)),columns = ['results'])
	bet_vector = np.array(np.where(predicted_spreads > spreads,0,1))
	print spreads
	print predicted_spreads
	print bet_vector