def tune_linsvr(): name = 'LinSVR' dimension = 'length' stage, linsvr_reg, scale, features, linsvr_checkpoint_score = stage_meta_init(meta_dimension, name, dimension) if stage == 0: linsvr_reg = SVR(kernel = 'linear') linsvr_checkpoint_score = -np.inf features = init_feat_selection(X, Y, linsvr_reg) scale, linsvr_checkpoint_score = test_scaler(linsvr_reg, X[features], Y) _save_meta_model(meta_dimension, stage, dimension, name, linsvr_reg, scale, linsvr_checkpoint_score, features, final = False) elif stage == 1: linsvr_checkpoint_score, features = feat_selection(X[features], Y, scale, linsvr_reg, linsvr_checkpoint_score, 10, -1, False) _save_meta_model(meta_dimension, stage, dimension, name, linsvr_reg, scale, linsvr_checkpoint_score, features, final = False) elif stage == 2: scale, linsvr_checkpoint_score = test_scaler(linsvr_reg, X[features], Y) _save_meta_model(meta_dimension, stage, dimension, name, linsvr_reg, scale, linsvr_checkpoint_score, features, final = False) elif stage == 3: linsvr_reg, linsvr_checkpoint_score = C_parameter_tuning(X[features], Y, linsvr_reg, scale, linsvr_checkpoint_score) _save_meta_model(meta_dimension, stage, dimension, name, linsvr_reg, scale, linsvr_checkpoint_score, features, final = False) elif stage == 4: scale, linsvr_checkpoint_score = test_scaler(linsvr_reg, X[features], Y) _save_meta_model(meta_dimension, stage, dimension, name, linsvr_reg, scale, linsvr_checkpoint_score, features, final = False) elif stage == 5: linsvr_checkpoint_score, features = feat_selection(X[features], Y, scale, linsvr_reg, linsvr_checkpoint_score, 10, -1, False) _save_meta_model(meta_dimension, stage, dimension, name, linsvr_reg, scale, linsvr_checkpoint_score, features, final = True)
def tune_lasso(): name = 'LassoRegression' dimension = 'length' stage, lasso_reg, scale, features, lasso_checkpoint_score = stage_meta_init(meta_dimension, name, dimension) if stage == 0: lasso_reg = Lasso(max_iter = 1000, random_state = 1108) lasso_checkpoint_score = -np.inf scale, lasso_checkpoint_score = test_scaler(lasso_reg, X, Y) _save_meta_model(meta_dimension, stage, dimension, name, lasso_reg, scale, lasso_checkpoint_score, list(X), final = False) elif stage == 1: lasso_checkpoint_score, features = feat_selection(X[features], Y, scale, lasso_reg, lasso_checkpoint_score, 24, -1, False) _save_meta_model(meta_dimension, stage, dimension, name, lasso_reg, scale, lasso_checkpoint_score, features, final = False) elif stage == 2: scale, lasso_checkpoint_score = test_scaler(lasso_reg, X, Y) _save_meta_model(meta_dimension, stage, dimension, name, lasso_reg, scale, lasso_checkpoint_score, features, final = False) elif stage == 3: lasso_reg, lasso_checkpoint_score = alpha_parameter_tuning(X[features], Y, lasso_reg, scale, lasso_checkpoint_score) _save_meta_model(meta_dimension, stage, dimension, name, lasso_reg, scale, lasso_checkpoint_score, features, final = False) elif stage == 4: lasso_checkpoint_score, features = feat_selection(X[features], Y, scale, lasso_reg, lasso_checkpoint_score, 24, -1, False) _save_meta_model(meta_dimension, stage, dimension, name, lasso_reg, scale, lasso_checkpoint_score, features, final = False) elif stage == 5: scale, lasso_checkpoint_score = test_scaler(lasso_reg, X, Y) _save_meta_model(meta_dimension, stage, dimension, name, lasso_reg, scale, lasso_checkpoint_score, features, final = True)
def tune_rf(): name = 'RFreg' dimension = 'length' stage, rf_reg, scale, features, rf_checkpoint_score = stage_meta_init(meta_dimension, name, dimension) if stage == 0: rf_reg = RandomForestRegressor(random_state = 1108, n_estimators = 100) rf_checkpoint_score = -np.inf scale, rf_checkpoint_score = test_scaler(rf_reg, X, Y) _save_meta_model(meta_dimension, stage, dimension, name, rf_reg, scale, rf_checkpoint_score, list(X), final = False) elif stage == 1: rf_checkpoint_score, features = feat_selection_2(X[features], Y, scale, rf_reg, rf_checkpoint_score, 24, -1, False) _save_meta_model(meta_dimension, stage, dimension, name, rf_reg, scale, rf_checkpoint_score, features, final = False) elif stage == 2: scale, rf_checkpoint_score = test_scaler(rf_reg, X[features], Y) _save_meta_model(meta_dimension, stage, dimension, name, rf_reg, scale, rf_checkpoint_score, features, final = False) elif stage == 3: rf_reg, rf_checkpoint_score = forest_params(X[features], Y, rf_reg, scale, rf_checkpoint_score, iter_ = 1000) _save_meta_model(meta_dimension, stage, dimension, name, rf_reg, scale, rf_checkpoint_score, features, final = False) elif stage == 4: scale, rf_checkpoint_score = test_scaler(rf_reg, X[features], Y) _save_meta_model(meta_dimension, stage, dimension, name, rf_reg, scale, rf_checkpoint_score, features, final = False) elif stage == 5: rf_checkpoint_score, features = feat_selection(X[features], Y, scale, rf_reg, rf_checkpoint_score, 24, -1, False) _save_meta_model(meta_dimension, stage, dimension, name, rf_reg, scale, rf_checkpoint_score, features, final = False) elif stage == 6: rf_reg, rf_checkpoint_score = rf_trees(X, Y, scale, rf_reg, rf_checkpoint_score) _save_meta_model(meta_dimension, stage, dimension, name, rf_reg, scale, rf_checkpoint_score, features, final = True)
def tune_dartr(): name = 'DartrGBM' dimension = 'length' stage, dart_reg, scale, features, dartr_checkpoint_score = stage_meta_init(meta_dimension, name, dimension) if stage == 0: dart_reg = lgb.LGBMRegressor(random_state = 1108, n_estimators = 100, subsample = .8, verbose=-1) dartr_checkpoint_score = -np.inf scale, dartr_checkpoint_score = test_scaler(dart_reg, X, Y) _save_meta_model(meta_dimension, stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, list(X), final = False) elif stage == 1: dartr_checkpoint_score, features = feat_selection(X[features], Y, scale, dart_reg, dartr_checkpoint_score, 24, -1, False) _save_meta_model(meta_dimension, stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final = False) elif stage == 2: scale, dartr_checkpoint_score = test_scaler(dart_reg, X[features], Y) _save_meta_model(meta_dimension, stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final = False) elif stage == 3: dart_reg, dartr_checkpoint_score = lgb_find_lr(dart_reg, X[features], Y, scale, dartr_checkpoint_score) _save_meta_model(meta_dimension, stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final = False) elif stage == 4: scale, dartr_checkpoint_score = test_scaler(dart_reg, X[features], Y) _save_meta_model(meta_dimension, stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final = False) elif stage == 5: dartr_checkpoint_score, features = feat_selection(X[features], Y, scale, dart_reg, dartr_checkpoint_score, 24, -1, False) _save_meta_model(meta_dimension, stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final = False) elif stage == 6: dart_reg, dartr_checkpoint_score = lgb_tree_params(X[features], Y, dart_reg, scale, dartr_checkpoint_score, iter_ = 1000) _save_meta_model(meta_dimension, stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final = False) elif stage == 7: scale, dartr_checkpoint_score = test_scaler(dart_reg, X[features], Y) _save_meta_model(meta_dimension, stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final = False) elif stage == 8: dartr_checkpoint_score, features = feat_selection(X[features], Y, scale, dart_reg, dartr_checkpoint_score, 24, -1, False) _save_meta_model(meta_dimension, stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final = False) elif stage == 9: dart_reg, dartr_checkpoint_score = lgb_drop_lr(dart_reg, X[features], Y, scale, dartr_checkpoint_score) _save_meta_model(meta_dimension, stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final = True)
def tune_dartr(): name = 'DartrGBM' dimension = 'length' stage, dart_reg, scale, features, dartr_checkpoint_score = stage_init( name, dimension) if stage == 0: # import time # start = time.time() dart_reg = lgb.LGBMRegressor(random_state=1108, n_estimators=100, subsample=.8, verbose=-1) dartr_checkpoint_score = -np.inf scale, dartr_checkpoint_score = test_scaler(dart_reg, X, Y) # done = time.time() # done - start #ubuntu #sinlge: 50.78195834159851 #multi: 64.57199811935425 _save_model(stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, list(X), final=False) elif stage == 1: dartr_checkpoint_score, features = feat_selection_2( X[features], Y, scale, dart_reg, dartr_checkpoint_score) _save_model(stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final=False) elif stage == 2: scale, dartr_checkpoint_score = test_scaler(dart_reg, X[features], Y) _save_model(stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final=False) elif stage == 3: dart_reg, dartr_checkpoint_score = lgb_find_lr(dart_reg, X[features], Y, scale, dartr_checkpoint_score) _save_model(stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final=False) elif stage == 4: scale, dartr_checkpoint_score = test_scaler(dart_reg, X[features], Y) _save_model(stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final=False) elif stage == 5: dartr_checkpoint_score, features = feat_selection( X[features], Y, scale, dart_reg, dartr_checkpoint_score, _iter=25) _save_model(stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final=False) elif stage == 6: dart_reg, dartr_checkpoint_score = lgb_tree_params( X[features], Y, dart_reg, scale, dartr_checkpoint_score) _save_model(stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final=False) elif stage == 7: scale, dartr_checkpoint_score = test_scaler(dart_reg, X[features], Y) _save_model(stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final=False) elif stage == 8: dartr_checkpoint_score, features = feat_selection( X[features], Y, scale, dart_reg, dartr_checkpoint_score) _save_model(stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final=False) elif stage == 9: dart_reg, dartr_checkpoint_score = lgb_drop_lr(dart_reg, X[features], Y, scale, dartr_checkpoint_score) _save_model(stage, dimension, name, dart_reg, scale, dartr_checkpoint_score, features, final=True)
def tune_lgr(): name = 'LightGBR' dimension = 'length' stage, lgb_reg, scale, features, lgbr_checkpoint_score = stage_init( name, dimension) if stage == 0: lgb_reg = lgb.LGBMRegressor(random_state=1108, n_estimators=100, subsample=.8, verbose=-1) lgbr_checkpoint_score = -np.inf scale, lgbr_checkpoint_score = test_scaler(lgb_reg, X, Y) _save_model(stage, dimension, name, lgb_reg, scale, lgbr_checkpoint_score, list(X), final=False) elif stage == 1: lgbr_checkpoint_score, features = feat_selection_2( X[features], Y, scale, lgb_reg, lgbr_checkpoint_score) _save_model(stage, dimension, name, lgb_reg, scale, lgbr_checkpoint_score, features, final=False) elif stage == 2: scale, lgbr_checkpoint_score = test_scaler(lgb_reg, X[features], Y) _save_model(stage, dimension, name, lgb_reg, scale, lgbr_checkpoint_score, features, final=False) elif stage == 3: lgb_reg, lgbr_checkpoint_score = lgb_find_lr(lgb_reg, X[features], Y, scale, lgbr_checkpoint_score) _save_model(stage, dimension, name, lgb_reg, scale, lgbr_checkpoint_score, features, final=False) elif stage == 4: scale, lgbr_checkpoint_score = test_scaler(lgb_reg, X[features], Y) _save_model(stage, dimension, name, lgb_reg, scale, lgbr_checkpoint_score, features, final=False) elif stage == 5: lgbr_checkpoint_score, features = feat_selection( X[features], Y, scale, lgb_reg, lgbr_checkpoint_score) _save_model(stage, dimension, name, lgb_reg, scale, lgbr_checkpoint_score, features, final=False) elif stage == 6: lgb_reg, lgbr_checkpoint_score = lgb_tree_params( X[features], Y, lgb_reg, scale, lgbr_checkpoint_score) _save_model(stage, dimension, name, lgb_reg, scale, lgbr_checkpoint_score, features, final=False) elif stage == 7: scale, lgbr_checkpoint_score = test_scaler(lgb_reg, X[features], Y) _save_model(stage, dimension, name, lgb_reg, scale, lgbr_checkpoint_score, features, final=False) elif stage == 8: lgbr_checkpoint_score, features = feat_selection(X[features], Y, scale, lgb_reg, lgbr_checkpoint_score, _iter=25) _save_model(stage, dimension, name, lgb_reg, scale, lgbr_checkpoint_score, features, final=False) elif stage == 9: lgb_reg, lgbr_checkpoint_score = lgb_drop_lr(lgb_reg, X[features], Y, scale, lgbr_checkpoint_score) _save_model(stage, dimension, name, lgb_reg, scale, lgbr_checkpoint_score, features, final=True)
def tune_dart(): name = 'DartGBM' dimension = 'winner' stage, dart_clf, dart_checkpoint_score = stage_init(name, dimension, extension=EXTENSION) if stage == 0: dart_clf = lgb.LGBMClassifier(random_state=1108, n_estimators=100, subsample=.8, verbose=-1, is_unbalance=True) dart_clf, dart_checkpoint_score = pipe_init(X, Y, dart_clf) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 1: dart_clf, dart_checkpoint_score = test_scaler(dart_clf, dart_checkpoint_score, X, Y) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 2: dart_clf, dart_checkpoint_score = feat_selection( X, Y, dart_clf, dart_checkpoint_score) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 3: dart_clf, dart_checkpoint_score = test_scaler(dart_clf, dart_checkpoint_score, X, Y) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 4: dart_clf, dart_checkpoint_score = pca_tune(X, Y, dart_clf, dart_checkpoint_score) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 5: dart_clf, dart_checkpoint_score = feat_selection( X, Y, dart_clf, dart_checkpoint_score) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 6: dart_clf, dart_checkpoint_score = test_scaler(dart_clf, dart_checkpoint_score, X, Y) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 7: dart_clf, dart_checkpoint_score = lgb_find_lr(dart_clf, X, Y, dart_checkpoint_score) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 8: dart_clf, dart_checkpoint_score = pca_tune(X, Y, dart_clf, dart_checkpoint_score, iter_=10) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 9: dart_clf, dart_checkpoint_score = feat_selection( X, Y, dart_clf, dart_checkpoint_score) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 10: dart_clf, dart_checkpoint_score = test_scaler(dart_clf, dart_checkpoint_score, X, Y) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 11: dart_clf, dart_checkpoint_score = lgb_tree_params( X, Y, dart_clf, dart_checkpoint_score) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 12: dart_clf, dart_checkpoint_score = pca_tune(X, Y, dart_clf, dart_checkpoint_score, iter_=10) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 13: dart_clf, dart_checkpoint_score = feat_selection( X, Y, dart_clf, dart_checkpoint_score) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 14: dart_clf, dart_checkpoint_score = test_scaler(dart_clf, dart_checkpoint_score, X, Y) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=False, extension=EXTENSION) elif stage == 15: dart_clf, dart_checkpoint_score = lgb_drop_lr(dart_clf, X, Y, dart_checkpoint_score) _save_model(stage, 'winner', name, dart_clf, dart_checkpoint_score, final=True, extension=EXTENSION)
def tune_log(): name = 'LogRegression' dimension = 'winner' stage, log_clf, log_checkpoint_score = stage_init(name, dimension, extension=EXTENSION) if stage == 0: log_clf = LogisticRegression(max_iter=1000, random_state=1108, class_weight='balanced', solver='lbfgs') log_clf, log_checkpoint_score = pipe_init(X, Y, log_clf) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 1: log_clf, log_checkpoint_score = test_scaler(log_clf, log_checkpoint_score, X, Y) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 2: log_clf, log_checkpoint_score = feat_selection( X, Y, log_clf, log_checkpoint_score) #, _iter = 5) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 3: log_clf, log_checkpoint_score = test_scaler(log_clf, log_checkpoint_score, X, Y) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 4: log_clf, log_checkpoint_score = pca_tune(X, Y, log_clf, log_checkpoint_score) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 5: log_clf, log_checkpoint_score = feat_selection( X, Y, log_clf, log_checkpoint_score) #, _iter = 5) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 6: log_clf, log_checkpoint_score = test_scaler(log_clf, log_checkpoint_score, X, Y) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 7: log_clf, log_checkpoint_score = C_parameter_tuning( X, Y, log_clf, log_checkpoint_score) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 8: log_clf, log_checkpoint_score = pca_tune(X, Y, log_clf, log_checkpoint_score, iter_=10) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 9: log_clf, log_checkpoint_score = feat_selection( X, Y, log_clf, log_checkpoint_score) #, _iter = 5) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 10: log_clf, log_checkpoint_score = test_scaler(log_clf, log_checkpoint_score, X, Y) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 11: log_clf, log_checkpoint_score = test_solver(X, Y, log_clf, log_checkpoint_score) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 12: log_clf, log_checkpoint_score = pca_tune(X, Y, log_clf, log_checkpoint_score, iter_=10) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 13: log_clf, log_checkpoint_score = feat_selection( X, Y, log_clf, log_checkpoint_score) #, _iter = 5) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=False, extension=EXTENSION) elif stage == 14: log_clf, log_checkpoint_score = test_scaler(log_clf, log_checkpoint_score, X, Y) _save_model(stage, 'winner', name, log_clf, log_checkpoint_score, final=True, extension=EXTENSION)
def tune_linsvc(): name = 'LinSVC' dimension = 'winner' stage, linsvc_clf, linsvc_checkpoint_score = stage_init( name, dimension, extension=EXTENSION) if stage == 0: linsvc_clf = SVC(random_state=1108, class_weight='balanced', kernel='linear', probability=True) linsvc_clf, linsvc_checkpoint_score = pipe_init(X, Y, linsvc_clf) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=False, extension=EXTENSION) elif stage == 1: linsvc_clf, linsvc_checkpoint_score = test_scaler( linsvc_clf, linsvc_checkpoint_score, X, Y) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=False, extension=EXTENSION) elif stage == 2: linsvc_clf, linsvc_checkpoint_score = feat_selection( X, Y, linsvc_clf, linsvc_checkpoint_score) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=False, extension=EXTENSION) elif stage == 3: linsvc_clf, linsvc_checkpoint_score = test_scaler( linsvc_clf, linsvc_checkpoint_score, X, Y) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=False, extension=EXTENSION) elif stage == 4: linsvc_clf, linsvc_checkpoint_score = pca_tune( X, Y, linsvc_clf, linsvc_checkpoint_score) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=False, extension=EXTENSION) elif stage == 5: linsvc_clf, linsvc_checkpoint_score = feat_selection( X, Y, linsvc_clf, linsvc_checkpoint_score) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=False, extension=EXTENSION) elif stage == 6: linsvc_clf, linsvc_checkpoint_score = test_scaler( linsvc_clf, linsvc_checkpoint_score, X, Y) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=False, extension=EXTENSION) elif stage == 7: linsvc_clf, linsvc_checkpoint_score = C_parameter_tuning( X, Y, linsvc_clf, linsvc_checkpoint_score) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=False, extension=EXTENSION) elif stage == 8: linsvc_clf, linsvc_checkpoint_score = pca_tune(X, Y, linsvc_clf, linsvc_checkpoint_score, iter_=10) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=False, extension=EXTENSION) elif stage == 9: linsvc_clf, linsvc_checkpoint_score = feat_selection( X, Y, linsvc_clf, linsvc_checkpoint_score) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=False, extension=EXTENSION) elif stage == 10: linsvc_clf, linsvc_checkpoint_score = test_scaler( linsvc_clf, linsvc_checkpoint_score, X, Y) _save_model(stage, 'winner', name, linsvc_clf, linsvc_checkpoint_score, final=True, extension=EXTENSION)
def tune_rf(): name = 'RFclass' dimension = 'winner' stage, rf_clf, rf_checkpoint_score = stage_init(name, dimension, extension=EXTENSION) if stage == 0: rf_clf = RandomForestClassifier(random_state=1108, n_estimators=100) rf_clf, rf_checkpoint_score = pipe_init(X, Y, rf_clf) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 1: rf_clf, rf_checkpoint_score = test_scaler(rf_clf, rf_checkpoint_score, X, Y) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 2: rf_clf, rf_checkpoint_score = feat_selection(X, Y, rf_clf, rf_checkpoint_score) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 3: rf_clf, rf_checkpoint_score = test_scaler(rf_clf, rf_checkpoint_score, X, Y) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 4: rf_clf, rf_checkpoint_score = pca_tune(X, Y, rf_clf, rf_checkpoint_score) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 5: rf_clf, rf_checkpoint_score = feat_selection(X, Y, rf_clf, rf_checkpoint_score) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 6: rf_clf, rf_checkpoint_score = test_scaler(rf_clf, rf_checkpoint_score, X, Y) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 7: rf_clf, rf_checkpoint_score = forest_params(X, Y, rf_clf, rf_checkpoint_score) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 8: rf_clf, rf_checkpoint_score = pca_tune(X, Y, rf_clf, rf_checkpoint_score, iter_=10) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 9: rf_clf, rf_checkpoint_score = feat_selection(X, Y, rf_clf, rf_checkpoint_score) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 10: rf_clf, rf_checkpoint_score = test_scaler(rf_clf, rf_checkpoint_score, X, Y) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=False, extension=EXTENSION) elif stage == 11: rf_clf, rf_checkpoint_score = rf_trees(X, Y, rf_clf, rf_checkpoint_score) _save_model(stage, 'winner', name, rf_clf, rf_checkpoint_score, final=True, extension=EXTENSION)