class model_tunning_params():

    # xgboost regressor
    def __init__(self, model_name, random_seed=None, params_list=None):
        self.model_name = model_name
        if model_name == 'xgb':
            self.model = xgb.XGBRegressor(learning_rate=0.1,
                                          n_estimators=1000,
                                          max_depth=7,
                                          min_child_weight=1,
                                          gamma=0,
                                          subsample=0.8,
                                          colsample_bytree=0.8,
                                          objective='reg:linear',
                                          nthread=4,
                                          scale_pos_weight=1,
                                          seed=26)
        elif model_name == 'rf':
            self.model = RandomForestRegressor(n_estimators=100,
                                               criterion='mse',
                                               max_features='sqrt',
                                               max_depth=None,
                                               n_jobs=-1,
                                               verbose=3,
                                               random_state=26)
        elif model_name == 'linear':
            self.model = LinearRegression(n_jobs=-1)

        elif model_name == 'svm':
            self.model = svm.SVR(C=1.0, kernel='rbf', gamma='auto', verbose=3)
        else:
            print(
                "not a valid model to tunning parameter \nplease try one of the followings: \n"
                + '-' * 20 + "\n linear \n svm \n xgb \n rf \n" + '-' * 20)

        self.params_list = params_list
        self.random_seed = np.random.seed(26)

    # search for best n_estimators and return the updated model
    def modelfit_xgb(self,
                     dtrain,
                     predictors,
                     useTrainCV=True,
                     cv_folds=5,
                     early_stopping_rounds=50,
                     metric='rmse',
                     obt='reg:linear'):
        target = "Response"
        if useTrainCV:
            xgb_param = self.model.get_xgb_params()
            xgb_param['objective'] = obt
            if xgb_param['objective'] == 'multi:softmax':
                xgb_param['num_class'] = 8
                metric = 'merror'
                xgtrain = xgb.DMatrix(dtrain[predictors].values,
                                      label=(dtrain[target] - 1).values)
            xgtrain = xgb.DMatrix(dtrain[predictors].values,
                                  label=dtrain[target].values)
            cvresult = xgb.cv(
                xgb_param,
                xgtrain,
                num_boost_round=self.model.get_params()['n_estimators'],
                nfold=cv_folds,
                metrics=metric,
                early_stopping_rounds=early_stopping_rounds,
                verbose_eval=3)
            self.model.set_params(n_estimators=cvresult.shape[0])

        # Fit the algorithm on the data
        self.model.fit(dtrain[predictors], dtrain[target], eval_metric=metric)

        # Predict training set:
        dtrain_predictions = self.model.predict(dtrain[predictors])

        if self.model._estimator_type == 'regressor':
            dtrain_prediction = np.clip(dtrain_predictions, 1, 8)
            dtrain_predictions = np.round(dtrain_prediction).astype(int)

        # print model report:
        print("\nModel Report")
        print(
            "Accuracy : %.4g" %
            metrics.accuracy_score(dtrain[target].values, dtrain_predictions))

        importance = self.model.booster().get_fscore()
        importance = sorted(importance.items(), key=operator.itemgetter(1))
        # plt.figure()
        df = pd.DataFrame(importance, columns=['feature', 'score'])
        df['score'] = df['score'] / df['score'].sum()
        # df.plot()
        df.plot(kind='barh',
                x='feature',
                y='score',
                legend=False,
                figsize=(6, 10))
        plt.title('XGBoost Feature Importance')
        plt.xlabel('importance value')
        # return model which has optimal n_estimators for a specific learning_rate

        return self.model

    def grid_search(self, data, predictors, param_test):
        print('Grid search for ' + self.model_name)
        print('Parameter going to be tuned for ' + param_test)
        print(
            'Could be take a long time which depending on your machine resources.'
        )
        while True:
            user_enter = input('Continue:[y/n]')
            if user_enter == 'y':
                break
            elif user_enter == 'n':
                return ('exit parameter grid search for ' + self.model_name +
                        ' model')
            else:
                print('not a valid input, please enter [y/n]')

        target = 'Response'
        gsearch = GridSearchCV(estimator=self.model,
                               param_grid=param_test,
                               iid=False,
                               cv=5,
                               scoring=myscorer,
                               n_jobs=-1,
                               verbose=3)

        gsearch.fit(data[predictors], data[target])
        print('\n grid_scores: ', gsearch.grid_scores_)
        print('\n best parameters: ', gsearch.best_params_)
        print('\n best score: ', gsearch.best_score_)

        # update parameters
        for index, value in gsearch.best_params_.items():
            self.model.set_params(**{index: value})

        return self.model, gsearch

    def model_store(self):
        print('store updated model for reporducible result')
        joblib.dump(self.model, 'models/%s.pkl' % (self.model_name))
        print('-----finished-----')