def main():
    # Set seed
    np.random.seed(0)

    # Create the training/test set(s) from file(s)
    train = pd.read_csv("data/all_visits_practice_2.csv")

    # Preliminary data diagnostics
    mL.describe_data(data=train, describe=True, info=True, value_counts=["ONOFF", "NP3BRADY"],
                     description="PRELIMINARY DATA DIAGNOSTICS:")

    # Encode EVENT_ID to numeric
    mL.clean_data(data=train, encode_man={"EVENT_ID": {"SC": 0, "V04": 4, "V06": 6, "V10": 10}})

    # Choose On or Off
    train = train[train["ONOFF"] == 0]

    # Remove the class with only a single sample
    train = train[train.NP3BRADY != 4]

    # Predictors for the model
    predictors = ["TIME_PASSED", "VISIT_NOW", "CAUDATE_R", "CAUDATE_L", "PUTAMEN_R", "PUTAMEN_L",
                  "SCORE_NOW"]

    # Target for the model
    target = "SCORE_NEXT"

    # Generate new features
    train = generate_features(data=train, predictors=predictors, target=target, id_name="PATNO", score_name="NP3BRADY",
                              visit_name="EVENT_ID")

    # Value counts for EVENT_ID after feature generation
    mL.describe_data(data=train, info=True, describe=True, value_counts=["VISIT_NOW", "SCORE_NEXT"],
                     description="AFTER FEATURE GENERATION:")

    # Univariate feature selection
    mL.describe_data(data=train, univariate_feature_selection=[predictors, target])

    # Algs for model
    algs = [RandomForestClassifier(n_estimators=1000, min_samples_split=50, min_samples_leaf=2, oob_score=True),
            LogisticRegression(),
            SVC(probability=True),
            GaussianNB(),
            MultinomialNB(),
            BernoulliNB(),
            KNeighborsClassifier(n_neighbors=25),
            GradientBoostingClassifier(n_estimators=10, max_depth=3)]

    # Alg names for model
    alg_names = ["Random Forest",
                 "Logistic Regression",
                 "SVM",
                 "Gaussian Naive Bayes",
                 "Multinomial Naive Bayes",
                 "Bernoulli Naive Bayes",
                 "kNN",
                 "Gradient Boosting"]

    # Parameters for grid search
    grid_search_params = [{"n_estimators": [50, 500, 1000],
                           "min_samples_split": [25, 50, 75],
                           "min_samples_leaf": [2, 15, 25, 50]}]

    # Ensemble
    ens = mL.ensemble(algs=algs, alg_names=alg_names,
                      ensemble_name="Weighted ensemble of RF, LR, SVM, GNB, KNN, and GB",
                      in_ensemble=[True, True, True, True, False, False, True, True], weights=[3, 2, 1, 3, 1, 3],
                      voting="soft")

    # Add ensemble to algs and alg_names
    algs.append(ens["alg"])
    alg_names.append(ens["name"])

    # Display ensemble metrics
    mL.metrics(data=train, predictors=predictors, target=target, algs=algs, alg_names=alg_names,
               feature_importances=[True], base_score=[True], oob_score=[True],
               cross_val=[True, True, True, True, True, True, True, True, True],
               split_accuracy=[True, True, True, True, True, True, True, True, True],
               split_classification_report=[False, False, False, False, False, False, False, False, True],
               split_confusion_matrix=[False, False, False, False, False, False, False, False, True])
def main():
    # Set seed
    np.random.seed(0)

    # Create the data frames from files
    all_patients = pd.read_csv("data/all_pats.csv")
    all_visits = pd.read_csv("data/all_visits.csv")
    all_updrs = pd.read_csv("data/all_updrs.csv")
    all_updrs_subcomponents = pd.read_csv("data/itemizedDistributionOfUPDRSMeaning_Use.csv")

    # Enrolled PD / Control patients
    pd_control_patients = all_patients.loc[
        ((all_patients["DIAGNOSIS"] == "PD") | (all_patients["DIAGNOSIS"] == "Control")) & (
            all_patients["ENROLL_STATUS"] == "Enrolled"), "PATNO"].unique()

    # Data for these patients
    pd_control_data = all_visits[all_visits["PATNO"].isin(pd_control_patients)]

    # Merge with UPDRS scores
    pd_control_data = pd_control_data.merge(all_updrs[["PATNO", "EVENT_ID", "TOTAL"]], on=["PATNO", "EVENT_ID"],
                                            how="left")

    # Get rid of nulls for UPDRS
    pd_control_data = pd_control_data[pd_control_data["TOTAL"].notnull()]

    # Merge with patient info
    pd_control_data = pd_control_data.merge(all_patients, on="PATNO", how="left")

    # TODO: Merge patient's SC features onto baseline if times are close
    # Only include baseline and subsequent visits
    pd_control_data = pd_control_data[
        (pd_control_data["EVENT_ID"] != "ST") & (
            pd_control_data["EVENT_ID"] != "U01") & (pd_control_data["EVENT_ID"] != "PW") & (
            pd_control_data["EVENT_ID"] != "SC")]

    # Encode to numeric
    mL.clean_data(data=pd_control_data, encode_auto=["GENDER.x", "DIAGNOSIS", "HANDED"], encode_man={
        "EVENT_ID": {"BL": 0, "V01": 1, "V02": 2, "V03": 3, "V04": 4, "V05": 5, "V06": 6, "V07": 7, "V08": 8,
                     "V09": 9, "V10": 10, "V11": 11, "V12": 12}})

    # TODO: Optimize flexibility with NAs
    # Eliminate features with more than 20% NAs
    for feature in pd_control_data.keys():
        if len(pd_control_data.loc[pd_control_data[feature].isnull(), feature]) / len(
                pd_control_data[feature]) > 0.2:
            pd_control_data = pd_control_data.drop(feature, 1)

    # TODO: Rethink this
    # Eliminate features with more than 30% NA at Baseline
    for feature in pd_control_data.keys():
        if len(pd_control_data.loc[
                           (pd_control_data["EVENT_ID"] == 0) & (pd_control_data[feature].isnull()), feature]) / len(
            pd_control_data[pd_control_data["EVENT_ID"] == 0]) > 0.3:
            pd_control_data = pd_control_data.drop(feature, 1)

    # TODO: Imputation
    # Drop rows with NAs
    pd_control_data = pd_control_data.dropna()

    # Drop duplicates (keep first, delete others)
    pd_control_data = pd_control_data.drop_duplicates(subset=["PATNO", "EVENT_ID"])

    # Drop patients without BL data
    for patient in pd_control_data["PATNO"].unique():
        if patient not in pd_control_data.loc[pd_control_data["EVENT_ID"] == 0, "PATNO"].unique():
            pd_control_data = pd_control_data[pd_control_data["PATNO"] != patient]

    # Select all features in the data set
    all_data_features = list(pd_control_data.columns.values)

    for updrs_subscomponent in all_updrs_subcomponents["colname"].tolist():
        print(updrs_subscomponent)
        for i in range(0, 4):
            if all_updrs_subcomponents.loc[
                        all_updrs_subcomponents["colname"] == updrs_subscomponent, "use{}".format(i)].min() == 1:
                # Generate features (and update all features list)
                train = generate_features(data=pd_control_data, features=all_data_features, file="data/PPMI_train.csv",
                                          action=True, updrs_subsets=True, time=True, future=False, milestones=True,
                                          slopes=False, score_name=updrs_subscomponent,
                                          milestone_feature=updrs_subscomponent, milestone_value=i)

                # Initialize predictors as all features
                predictors = list(train.columns.values)

                # Initialize which features to drop from predictors
                drop_predictors = ["PATNO", "EVENT_ID", "INFODT", "INFODT.x", "ORIG_ENTRY", "LAST_UPDATE", "PAG_UPDRS3",
                                   "PRIMDIAG",
                                   "COMPLT", "INITMDDT", "INITMDVS", "RECRUITMENT_CAT", "IMAGING_CAT", "ENROLL_DATE",
                                   "ENROLL_CAT",
                                   "ENROLL_STATUS", "BIRTHDT.x", "GENDER.y", "APPRDX", "GENDER", "CNO", "TIME_FUTURE",
                                   "TIME_NOW",
                                   "SCORE_FUTURE", "SCORE_SLOPE", "TIME_OF_MILESTONE", "TIME_UNTIL_MILESTONE",
                                   "BIRTHDT.y",
                                   "TIME_SINCE_DIAGNOSIS", "TIME_SINCE_FIRST_SYMPTOM", "TIME_FROM_BL"]

                # List of UPDRS components
                updrs_components = ["NP1COG", "NP1HALL", "NP1DPRS", "NP1ANXS", "NP1APAT", "NP1DDS", "NP1SLPN",
                                    "NP1SLPD",
                                    "NP1PAIN",
                                    "NP1URIN", "NP1CNST", "NP1LTHD", "NP1FATG", "NP2SPCH", "NP2SALV", "NP2SWAL",
                                    "NP2EAT",
                                    "NP2DRES", "NP2HYGN", "NP2HWRT", "NP2HOBB", "NP2TURN", "NP2TRMR", "NP2RISE",
                                    "NP2WALK",
                                    "NP2FREZ", "PAG_UPDRS3", "NP3SPCH", "NP3FACXP", "NP3RIGN", "NP3RIGRU", "NP3RIGLU",
                                    "PN3RIGRL",
                                    "NP3RIGLL", "NP3FTAPR", "NP3FTAPL", "NP3HMOVR", "NP3HMOVL", "NP3PRSPR", "NP3PRSPL",
                                    "NP3TTAPR",
                                    "NP3TTAPL", "NP3LGAGR", "NP3LGAGL", "NP3RISNG", "NP3GAIT", "NP3FRZGT", "NP3PSTBL",
                                    "NP3POSTR",
                                    "NP3BRADY", "NP3PTRMR", "NP3PTRML", "NP3KTRMR", "NP3KTRML", "NP3RTARU", "NP3RTALU",
                                    "NP3RTARL",
                                    "NP3RTALL", "NP3RTALJ", "NP3RTCON"]

                # Drop UPDRS components
                # drop_predictors.extend(updrs_components)

                # Drop unwanted features from predictors list
                for feature in drop_predictors:
                    if feature in predictors:
                        predictors.remove(feature)

                # Target for the model
                target = "TIME_UNTIL_MILESTONE"

                # Algs for model
                # Grid search (futures): n_estimators=50, min_samples_split=75, min_samples_leaf=50
                # Futures: n_estimators=150, min_samples_split=100, min_samples_leaf=25
                # Grid search (slopes): 'min_samples_split': 75, 'n_estimators': 50, 'min_samples_leaf': 25
                algs = [
                    RandomForestRegressor(n_estimators=150, min_samples_split=100, min_samples_leaf=25, oob_score=True),
                    LogisticRegression(),
                    SVC(probability=True),
                    GaussianNB(),
                    MultinomialNB(),
                    BernoulliNB(),
                    KNeighborsClassifier(n_neighbors=25),
                    GradientBoostingClassifier(n_estimators=10, max_depth=3)]

                # Alg names for model
                alg_names = ["Random Forest",
                             "Logistic Regression",
                             "SVM",
                             "Gaussian Naive Bayes",
                             "Multinomial Naive Bayes",
                             "Bernoulli Naive Bayes",
                             "kNN",
                             "Gradient Boosting"]

                # TODO: Configure ensemble
                # Ensemble
                ens = mL.ensemble(algs=algs, alg_names=alg_names,
                                  ensemble_name="Weighted ensemble of RF, LR, SVM, GNB, KNN, and GB",
                                  in_ensemble=[True, True, True, True, False, False, True, True],
                                  weights=[3, 2, 1, 3, 1, 3],
                                  voting="soft")

                # Add ensemble to algs and alg_names
                # algs.append(ens["alg"])
                # alg_names.append(ens["name"])

                # Parameters for grid search
                grid_search_params = [{"n_estimators": [50, 150, 300, 500, 750, 1000],
                                       "min_samples_split": [4, 8, 25, 50, 75, 100],
                                       "min_samples_leaf": [2, 8, 15, 25, 50, 75, 100]}]

                # Display ensemble metrics
                metrics1 = mL.metrics(data=train, predictors=predictors, target=target, algs=algs, alg_names=alg_names,
                                     cross_val=[True], scoring="r2")

                all_updrs_subcomponents.loc[
                    all_updrs_subcomponents["colname"] == updrs_subscomponent, "over{}_r2".format(i)] = \
                    metrics1["Cross Validation r2"]

                # Display ensemble metrics
                metrics2 = mL.metrics(data=train, predictors=predictors, target=target, algs=algs, alg_names=alg_names,
                                     cross_val=[True], scoring="root_mean_squared_error")

                all_updrs_subcomponents.loc[
                    all_updrs_subcomponents["colname"] == updrs_subscomponent, "over{}_rmse".format(i)] = \
                    metrics2["Cross Validation root_mean_squared_error"]

    all_updrs_subcomponents.to_csv("data/updrs_subcomponents_scores.csv")
def run(preprocess_data, cohorts, target, score_name, feature_elimination_n, gen_filename, gen_action,
        gen_updrs_subsets, gen_time, gen_future, gen_milestones, gen_milestone_features_values, gen_slopes,
        predictors_filename, predictors_action, feature_importance_n, grid_search_action, grid_search_results,
        print_results, results_filename, prediction_range, range_target, range_target_description, add_predictors,
        drop_predictors):
    # Initialize empty add_predictors
    if add_predictors is None:
        add_predictors = []

    # Data keys
    data_keys = ["PATNO", "EVENT_ID", "INFODT", "PDDXDT", "SXDT", "BIRTHDT.x", "HAS_PD", target]

    # Target keys
    target_keys = [score_name] if gen_future or gen_slopes else [
        x[0] for x in gen_milestone_features_values] if gen_milestones else []

    # Add target keys to data keys
    data_keys.extend(target_keys)

    # TODO: Create data_preprocessing() function for all of this data preprocessing
    if preprocess_data:
        # Create the data frames from files
        with np.warnings.catch_warnings():
            np.warnings.simplefilter("ignore")
            all_patients = pd.read_csv("data/all_pats.csv")
            all_visits = pd.read_csv("data/all_visits.csv")
            all_updrs = pd.read_csv("data/all_updrs.csv")

        # Enrolled cohorts patients
        pd_control_patients = all_patients.loc[
            (np.bitwise_or.reduce(np.array([(all_patients["APPRDX"] == cohort) for cohort in cohorts]))) & (
                all_patients["ENROLL_STATUS"] == "Enrolled"), "PATNO"].unique()

        # Data for these patients
        pd_control_data = all_visits[all_visits["PATNO"].isin(pd_control_patients)].merge(
                all_updrs[["PATNO", "EVENT_ID", "TOTAL"]], on=["PATNO", "EVENT_ID"], how="left").merge(
                all_patients, on="PATNO", how="left", suffixes=["_x", ""])

        # Only include "off" data
        pd_control_data = pd_control_data[pd_control_data["PAG_UPDRS3"] == "NUPDRS3"]

        # # Merge SC data onto BL data
        # sc_bl_merge = pd_control_data[pd_control_data["EVENT_ID"] == "BL"].merge(
        #     pd_control_data[pd_control_data["EVENT_ID"] == "SC"], on="PATNO", how="left", suffixes=["", "_SC_ID"])
        #
        # # Remove SC data that already belongs to BL
        # pd_control_data.loc[pd_control_data["EVENT_ID"] == "BL"] = sc_bl_merge.drop(
        #     [col for col in sc_bl_merge.columns if col[-6:] == "_SC_ID"], axis=1).values

        # # Initiate progress
        # prog = Progress(0, len(pd_control_data["PATNO"].unique()), "Merging Screening Into Baseline", print_results)
        #
        # # Use SC data where BL is null
        # for patient in pd_control_data["PATNO"].unique():
        #     if not pd_control_data[(pd_control_data["PATNO"] == patient) & (pd_control_data["EVENT_ID"] == "SC")].empty:
        #         for column in pd_control_data.keys():
        #             if (pd_control_data.loc[(pd_control_data["PATNO"] == patient) & (
        #                         pd_control_data["EVENT_ID"] == "BL"), column].isnull().values.all()) and (
        #                     pd_control_data.loc[(pd_control_data["PATNO"] == patient) & (
        #                                 pd_control_data["EVENT_ID"] == "SC"), column].notnull().values.any()):
        #                 pd_control_data.loc[
        #                     (pd_control_data["PATNO"] == patient) & (pd_control_data["EVENT_ID"] == "BL"), column] = \
        #                     max(pd_control_data.loc[
        #                             (pd_control_data["PATNO"] == patient) & (
        #                                 pd_control_data["EVENT_ID"] == "SC"), column].tolist())
        #     # Update progress
        #     prog.update_progress()

        # Remove SC rows
        pd_control_data = pd_control_data[pd_control_data["EVENT_ID"] != "SC"]

        # Drop duplicates based on PATNO and EVENT_ID, keep only first
        pd_control_data = pd_control_data.drop_duplicates(subset=["PATNO", "EVENT_ID"], keep="first")

        # Encode to numeric
        mL.clean_data(data=pd_control_data, encode_auto=["HANDED", "PAG_UPDRS3"], encode_man={
            "EVENT_ID": {"BL": 0, "V01": 1, "V02": 2, "V03": 3, "V04": 4, "V05": 5, "V06": 6, "V07": 7, "V08": 8,
                         "V09": 9, "V10": 10, "V11": 11, "V12": 12}})

        # Create HAS_PD column
        pd_control_data["HAS_PD"] = 0
        pd_control_data.loc[(pd_control_data["APPRDX"] == "PD") | (pd_control_data["APPRDX"] == "GRPD") | (
            pd_control_data["APPRDX"] == "GCPD"), "HAS_PD"] = 1

        # Convert remaining categorical data to binary columns
        numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
        dummy_features = [item for item in pd_control_data.columns.values if item not in list(
                pd_control_data.select_dtypes(include=numerics).columns.values) + drop_predictors]
        pd_control_data = pd.get_dummies(pd_control_data, columns=dummy_features)

        # Controls have missing PDDXDT and SXDT, set to arbitrary date
        pd_control_data.loc[pd_control_data["HAS_PD"] == 0, "PDDXDT"] = pd.to_datetime("1/1/1800")
        pd_control_data.loc[pd_control_data["HAS_PD"] == 0, "SXDT"] = pd.to_datetime("1/1/1800")

        pd_control_data.to_csv("data/PPMI_Clean_Data.csv", index=False)
    else:
        # Use preprocessed data
        pd_control_data = pd.read_csv("data/PPMI_Clean_Data.csv")

        # Convert to correct dtypes
        pd_control_data[["PATNO", "EVENT_ID"]] = pd_control_data[["PATNO", "EVENT_ID"]].apply(pd.to_numeric,
                                                                                              errors="coerce")

    if predictors_action:
        if print_results:
            print("Optimizing Predictors . . .")

        # Drop unused columns
        for column in pd_control_data.keys():
            if (column in drop_predictors) and (column not in data_keys):
                pd_control_data = pd_control_data.drop(column, 1)
    else:
        # Drop unused columns
        pd_control_data = pd_control_data[list(
                set(add_predictors + data_keys) & set(
                        pd_control_data.columns.values.tolist()))]

        if print_results:
            # Print number patients and features before feature elimination
            print("BEFORE FEATURE ELIMINATION: Patients: {}, Features: {}".format(
                    len(pd_control_data[pd_control_data["EVENT_ID"] == 0]),
                    len(pd_control_data.keys())))

    pd_control_data.to_csv("TEST.csv")

    # Perform optimal feature elimination
    if feature_elimination_n is None:
        feature_elimination_n = max([x / 1000 for x in range(25, 1000, 25)],
                                    key=lambda n: feature_row_selection(pd_control_data, n, data_keys, target_keys,
                                                                        True, True))
        if print_results:
            print("\rFeature Elimination N: {}\n".format(feature_elimination_n))

    # Feature/row elimination
    pd_control_data = feature_row_selection(pd_control_data, feature_elimination_n, data_keys, target_keys)

    if (not predictors_action) and print_results:
        # Print number patients and features after feature elimination
        print("AFTER FEATURE ELIMINATION: Patients: {}, Features: {}".format(
                len(pd_control_data[pd_control_data["EVENT_ID"] == 0]),
                len(pd_control_data.keys())))

    # Select all features in the data set
    all_data_features = list(pd_control_data.columns.values)

    pd_control_data.to_csv("testttttt.csv")

    # Generate features (and update all features list)
    train = generate_features(data=pd_control_data, features=all_data_features, filename=gen_filename,
                              action=gen_action, updrs_subsets=gen_updrs_subsets,
                              time=gen_time, future=gen_future, milestones=gen_milestones, slopes=gen_slopes,
                              score_name=score_name, milestone_features_values=gen_milestone_features_values,
                              progress=(not predictors_action) and print_results)

    if (not predictors_action) and print_results:
        # Data diagnostics after feature generation
        mL.describe_data(data=train, describe=True, description="AFTER FEATURE GENERATION:")

    # Parameters for grid search
    grid_search_params = [{"n_estimators": [50, 150, 300, 500, 750, 1000],
                           "min_samples_split": [4, 8, 25, 50, 75, 100],
                           "min_samples_leaf": [2, 8, 15, 25, 50, 75, 100]}]

    # Algs for model
    # Grid search (futures): n_estimators=50, min_samples_split=75, min_samples_leaf=50
    # Futures: n_estimators=150, min_samples_split=100, min_samples_leaf=25
    # Grid search (slopes): 'min_samples_split': 75, 'n_estimators': 50, 'min_samples_leaf': 25
    # Futures: 'min_samples_leaf': 100, 'min_samples_split': 25, 'n_estimators': 50
    # Newest Futures: {'n_estimators': 500, 'min_samples_leaf': 2, 'min_samples_split': 4}
    # TRMR: {'n_estimators': 150, 'min_samples_leaf': 2, 'min_samples_split': 8}
    # Slopes: {'n_estimators': 500, 'min_samples_split': 25, 'min_samples_leaf': 2}
    algs = [
        RandomForestRegressor(n_estimators=500, min_samples_split=4, min_samples_leaf=2,
                              oob_score=True) if target != "SCORE_SLOPE" else RandomForestClassifier(n_estimators=500,
                                                                                                     min_samples_split=25,
                                                                                                     min_samples_leaf=2,
                                                                                                     oob_score=True),
        LogisticRegression(),
        SVC(probability=True),
        GaussianNB(),
        MultinomialNB(),
        BernoulliNB(),
        KNeighborsClassifier(n_neighbors=25),
        GradientBoostingClassifier(n_estimators=10, max_depth=3)]

    # Alg names for model
    alg_names = ["Random Forest",
                 "Logistic Regression",
                 "SVM",
                 "Gaussian Naive Bayes",
                 "Multinomial Naive Bayes",
                 "Bernoulli Naive Bayes",
                 "kNN",
                 "Gradient Boosting"]

    # TODO: Configure ensemble
    # Ensemble
    ens = mL.ensemble(algs=algs, alg_names=alg_names,
                      ensemble_name="Weighted ensemble of RF, LR, SVM, GNB, KNN, and GB",
                      in_ensemble=[True, True, True, True, False, False, True, True],
                      weights=[3, 2, 1, 3, 1, 3],
                      voting="soft")

    # Add ensemble to algs and alg_names
    # algs.append(ens["alg"])
    # alg_names.append(ens["name"])

    if predictors_action:
        # Initialize predictors as all numeric features
        numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
        predictors = list(train.select_dtypes(include=numerics).columns.values)

        # Drop unwanted features from predictors list
        for feature in drop_predictors:
            if feature in predictors:
                predictors.remove(feature)

        # If grid search action, use grid search estimator
        if grid_search_action:
            algs[0] = mL.metrics(data=train, predictors=predictors, target=target, algs=algs, alg_names=alg_names,
                                 scoring="r2" if target != "SCORE_SLOPE" else "accuracy",
                                 grid_search_params=grid_search_params,
                                 output=True)["Grid Search Random Forest"].best_estimator_

        train[predictors + ["PATNO"]].to_csv("test_yay_delete.csv")

        # Get feature importances
        feature_importances = mL.metrics(data=train, predictors=predictors, target=target, algs=algs,
                                         alg_names=alg_names, feature_importances=[True], output=True,
                                         description=None)["Feature Importances Random Forest"]

        # Set important features as predictors
        predictors = [x for x, y in feature_importances if y >= feature_importance_n]

        # Use predictors plus added predictors
        add_predictors.extend(predictors)

        # Output predictors to file
        pd.DataFrame({"predictors": predictors}).to_csv(predictors_filename, index=False)

        # Run with new predictors
        run(False, cohorts, target, score_name, feature_elimination_n, gen_filename, gen_action,
            gen_updrs_subsets, gen_time, gen_future, gen_milestones, gen_milestone_features_values, gen_slopes,
            predictors_filename, False, feature_importance_n, grid_search_action, grid_search_results, print_results,
            results_filename, prediction_range, range_target, range_target_description, add_predictors, drop_predictors)
    else:
        # Get predictors from file
        predictors = add_predictors

        # Create file of training data
        train[predictors].to_csv("data/PPMI_train.csv")

        # Grid search
        if grid_search_action or grid_search_results:
            # Compute grid search
            grid_search = mL.metrics(data=train, predictors=predictors, target=target, algs=algs, alg_names=alg_names,
                                     scoring="r2" if target != "SCORE_SLOPE" else "accuracy",
                                     grid_search_params=grid_search_params, output=True)

            # If grid search action, use grid search estimator
            if grid_search_action:
                algs[0] = grid_search["Grid Search Random Forest"].best_estimator_

        # Univariate feature selection
        # mL.describe_data(data=train, univariate_feature_selection=[predictors, target])

        # Display metrics, including r2 score
        metrics = mL.metrics(data=train, predictors=predictors, target=target, algs=algs, alg_names=alg_names,
                             feature_importances=[True], base_score=[True], oob_score=[True], cross_val=[True],
                             scoring="r2", output=not print_results)
        # feature_dictionary=[data_dictionary, "FEATURE", "DSCR"])

        # Display mean absolute error score
        metrics.update(mL.metrics(data=train, predictors=predictors, target=target, algs=algs, alg_names=alg_names,
                                  cross_val=[True], scoring="mean_absolute_error", description=None,
                                  output=not print_results))

        # Display root mean squared error score
        metrics.update(mL.metrics(data=train, predictors=predictors, target=target, algs=algs, alg_names=alg_names,
                                  cross_val=[True],
                                  scoring="root_mean_squared_error", description=None,
                                  output=not print_results))

        metrics["Cross Validation accuracy Random Forest"] = None

        # Metrics for classification
        if target == "SCORE_SLOPE":
            # Display classification accuracy
            metrics.update(mL.metrics(data=train, predictors=predictors, target=target, algs=algs, alg_names=alg_names,
                                      cross_val=[True], scoring="accuracy", description=None, output=not print_results))

            # Display confusion matrix
            mL.metrics(data=train, predictors=predictors, target=target, algs=algs, alg_names=alg_names,
                       split_confusion_matrix=[True], description=None, output=not print_results)

        # If grid search results, print results
        if grid_search_results:
            print(grid_search["Grid Search String Random Forest"])

        if not print_results:
            # Write results to file
            results = pd.DataFrame(
                    columns=[prediction_range, "description", "base", "oob", "r2", "mes", "rmse", "accuracy",
                             "features",
                             "importances"])
            results.loc[0, prediction_range] = range_target
            results.loc[0, "description"] = range_target_description
            results.loc[0, "base"] = metrics["Base Score Random Forest"]
            results.loc[0, "oob"] = metrics["OOB Score Random Forest"]
            results.loc[0, "r2"] = metrics["Cross Validation r2 Random Forest"]
            results.loc[0, "mes"] = metrics["Cross Validation mean_absolute_error Random Forest"]
            results.loc[0, "rmse"] = metrics["Cross Validation root_mean_squared_error Random Forest"]
            results.loc[0, "accuracy"] = metrics["Cross Validation accuracy Random Forest"]
            feature_importances = list(metrics["Feature Importances Random Forest"])
            results.loc[0, "features"] = feature_importances[0][0]
            results.loc[0, "importances"] = feature_importances[0][1]
            for feature, importance in feature_importances[1:]:
                index = results.index.max() + 1
                results.loc[index, "features"] = feature
                results.loc[index, "importances"] = importance
            results.to_csv(results_filename, mode="a", header=False, index=False)