コード例 #1
0
 def smac_train_test(trainable, X_train, y_train):
     try:
         cv_score, logloss, execution_time = cross_val_score_track_trials(
             trainable,
             X_train,
             y_train,
             cv=self.cv,
             scoring=self.scoring)
         logger.debug("Successful trial of SMAC")
     except BaseException as e:
         #If there is any error in cross validation, use the score based on a random train-test split as the evaluation criterion
         if self.handle_cv_failure:
             X_train_part, X_validation, y_train_part, y_validation = train_test_split(
                 X_train, y_train, test_size=0.20)
             start = time.time()
             trained = trainable.fit(X_train_part, y_train_part)
             scorer = check_scoring(trainable, scoring=self.scoring)
             cv_score = scorer(trained, X_validation, y_validation)
             execution_time = time.time() - start
             y_pred_proba = trained.predict_proba(X_validation)
             try:
                 logloss = log_loss(y_true=y_validation,
                                    y_pred=y_pred_proba)
             except BaseException:
                 logloss = 0
                 logger.debug("Warning, log loss cannot be computed")
         else:
             logger.debug("Error {} with pipeline:{}".format(
                 e, trainable.to_json()))
             raise e
     return cv_score, logloss, execution_time
コード例 #2
0
ファイル: hyperopt_regressor.py プロジェクト: yutarochan/lale
        def hyperopt_train_test(params, X_train, y_train):
            warnings.filterwarnings("ignore")

            reg = create_instance_from_hyperopt_search_space(
                self.estimator, params)
            try:
                cv_score, _, execution_time = cross_val_score_track_trials(
                    reg,
                    X_train,
                    y_train,
                    cv=KFold(self.cv),
                    scoring=self.scoring)
                logger.debug("Successful trial of hyperopt")
            except BaseException as e:
                #If there is any error in cross validation, use the accuracy based on a random train-test split as the evaluation criterion
                if self.handle_cv_failure:
                    X_train_part, X_validation, y_train_part, y_validation = train_test_split(
                        X_train, y_train, test_size=0.20)
                    start = time.time()
                    reg_trained = reg.fit(X_train_part, y_train_part)
                    scorer = check_scoring(reg, scoring=self.scoring)
                    cv_score = scorer(reg_trained, X_validation, y_validation)
                    execution_time = time.time() - start
                else:
                    logger.debug(e)
                    logger.debug("Error {} with pipeline:{}".format(
                        e, reg.to_json()))
                    raise e

            return cv_score, execution_time
コード例 #3
0
ファイル: hyperopt_classifier.py プロジェクト: kant/lale
        def hyperopt_train_test(params, X_train, y_train):
            warnings.filterwarnings("ignore")

            clf = create_instance_from_hyperopt_search_space(
                self.model, params)
            try:
                cv_score, logloss, execution_time = cross_val_score_track_trials(
                    clf, X_train, y_train, cv=self.cv)
                logger.debug("Successful trial of hyperopt")
            except BaseException as e:
                #If there is any error in cross validation, use the accuracy based on a random train-test split as the evaluation criterion
                if self.handle_cv_failure:
                    X_train_part, X_validation, y_train_part, y_validation = train_test_split(
                        X_train, y_train, test_size=0.20)
                    start = time.time()
                    clf_trained = clf.fit(X_train_part, y_train_part)
                    predictions = clf_trained.predict(X_validation)
                    execution_time = time.time() - start
                    y_pred_proba = clf_trained.predict_proba(X_validation)
                    try:
                        logloss = log_loss(y_true=y_validation,
                                           y_pred=y_pred_proba)
                    except BaseException:
                        logloss = 0
                        logger.debug("Warning, log loss cannot be computed")
                    cv_score = accuracy_score(
                        y_validation, [round(pred) for pred in predictions])
                else:
                    logger.debug(e)
                    logger.debug("Error {} with pipeline:{}".format(
                        e, clf.to_json()))
                    raise e
            #print("TRIALS")
            #print(json.dumps(self.get_trials().trials, default = myconverter, indent=4))
            return cv_score, logloss, execution_time
コード例 #4
0
        def hyperopt_train_test(params, X_train, y_train):
            warnings.filterwarnings("ignore")

            clf = create_instance_from_hyperopt_search_space(
                self.estimator, params)
            try:
                cv_score, logloss, execution_time = cross_val_score_track_trials(
                    clf, X_train, y_train, cv=self.cv, scoring=self.scoring)
                logger.debug("Successful trial of hyperopt")
            except BaseException as e:
                #If there is any error in cross validation, use the score based on a random train-test split as the evaluation criterion
                if self.handle_cv_failure:
                    X_train_part, X_validation, y_train_part, y_validation = train_test_split(
                        X_train, y_train, test_size=0.20)
                    start = time.time()
                    clf_trained = clf.fit(X_train_part, y_train_part)
                    #predictions = clf_trained.predict(X_validation)
                    scorer = check_scoring(clf, scoring=self.scoring)
                    cv_score = scorer(clf_trained, X_validation, y_validation)
                    execution_time = time.time() - start
                    y_pred_proba = clf_trained.predict_proba(X_validation)
                    try:
                        logloss = log_loss(y_true=y_validation,
                                           y_pred=y_pred_proba)
                    except BaseException:
                        logloss = 0
                        logger.debug("Warning, log loss cannot be computed")
                else:
                    logger.debug(e)
                    logger.debug("Error {} with pipeline:{}".format(
                        e, clf.to_json()))
                    raise e
            return cv_score, logloss, execution_time
コード例 #5
0
        def hyperopt_train_test(params, X_train, y_train):
            warnings.filterwarnings("ignore")

            trainable = create_instance_from_hyperopt_search_space(
                self.estimator, params
            )
            try:
                cv_score, logloss, execution_time = cross_val_score_track_trials(
                    trainable,
                    X_train,
                    y_train,
                    cv=self.cv,
                    scoring=self.scoring,
                    args_to_scorer=self.args_to_scorer,
                )
                logger.debug(
                    "Successful trial of hyperopt with hyperparameters:{}".format(
                        params
                    )
                )
            except BaseException as e:
                # If there is any error in cross validation, use the score based on a random train-test split as the evaluation criterion
                if self.handle_cv_failure and trainable is not None:
                    (
                        X_train_part,
                        X_validation,
                        y_train_part,
                        y_validation,
                    ) = train_test_split(X_train, y_train, test_size=0.20)
                    start = time.time()
                    trained = trainable.fit(X_train_part, y_train_part, **fit_params)
                    scorer = check_scoring(trainable, scoring=self.scoring)
                    cv_score = scorer(
                        trained, X_validation, y_validation, **self.args_to_scorer
                    )
                    execution_time = time.time() - start
                    y_pred_proba = trained.predict_proba(X_validation)
                    try:
                        logloss = log_loss(y_true=y_validation, y_pred=y_pred_proba)
                    except BaseException:
                        logloss = 0
                        logger.debug("Warning, log loss cannot be computed")
                else:
                    logger.debug(e)
                    if trainable is None:
                        logger.debug(
                            "Error {} with uncreatable pipeline with parameters:{}".format(
                                e, lale.pretty_print.hyperparams_to_string(params)
                            )
                        )
                    else:
                        logger.debug(
                            "Error {} with pipeline:{}".format(e, trainable.to_json())
                        )
                    raise e
            return cv_score, logloss, execution_time