def tune_bayesian_ridge_regression_hyperparameters():
    #hyperparameters
    alpha1 = [1e-08, 1e-07,1e-06, 1e-05, 1e-04]#for to -8 to -4 in steps of 1
    lambda1 = [1e-08, 1e-07,1e-06, 1e-05, 1e-04]#for to -8 to -4 in steps of 1
    alpha2 = [1e-08, 1e-07,1e-06, 1e-05, 1e-04]#for to -8 to -4 in steps of 1
    lambda2 = [1e-08, 1e-07,1e-06, 1e-05, 1e-04]
    n_iter = list(range(200,401,50))#n_iter go from 200 to 400 in 50 size steps

    
    #trackers for best model and its scores
    best_model = None
    best_model_scores = None
    best_hyperparameters = []

    run_once_flag = False
    for alpha1_element in alpha1:
        for lambda1_element in lambda1:
            for alpha2_element in alpha2:
                for lambda2_element in lambda2:
                    for n_iter_element in n_iter:
                        
                        myModel = BayesianRidge(alpha_1=alpha1_element, lambda_1=lambda1_element, alpha_2=alpha2_element, lambda_2=lambda2_element, n_iter=n_iter_element)#should we set the 'normalize" parameter to true?, default is false
                        
                        #we get back a list of the scores
                        myScores = evaluate_model(myModel, 'Bayesian Ridge Regression') # Least squares loss with L2 reg.
                        
                        #if index is 0, this is teh first iteration of this loop, so just set best model and score b.c. otherwise we'd have nothing to compare against
                        if run_once_flag == False:
                            best_model = myModel
                            best_score = myScores
                            best_hyperparameters.append(alpha1_element)
                            best_hyperparameters.append(lambda1_element)
                            best_hyperparameters.append(alpha2_element)
                            best_hyperparameters.append(lambda2_element)
                            best_hyperparameters.append(n_iter_element)
                            run_once_flag = True


                        #check if we have a better model based on validaiton MSE score, and update if we do
                        if myScores[1] < best_score[1]: #we want the validation MSE
                            best_model = myModel
                            best_score = myScores
                            best_hyperparameters =[]#clear any old ones
                            best_hyperparameters.append(alpha1_element)
                            best_hyperparameters.append(lambda1_element)
                            best_hyperparameters.append(alpha2_element)
                            best_hyperparameters.append(lambda2_element)
                            best_hyperparameters.append(n_iter_element)

    #now that we've gon through all combinations of hyperparameters store everything in a bestModelObject and return the object
    return BestModelObject(best_model, best_score, best_hyperparameters)#return best model with best hyperparameters
def tune_linear_regression_hyperparameters():
    #hyperparameters --> none
    
    myModel = LinearRegression()#should we set the 'normalize" parameter to true?, default is false

    #we get back a list of the scores
    myScores = evaluate_model(myModel, 'Linear Regression') # Least squares loss with L1 reg.

    #trackers for best model and its scores
    best_model = myModel
    best_score = myScores
    best_hyperparameters =[]

    #now that we've gon through all combinations of hyperparameters store everything in a bestModelObject and return the object
    return BestModelObject(best_model, best_score, best_hyperparameters)#return best model with best hyperparameters
def tune_huber_regression_hyperparameters():
    #hyperparameters
    alpha = [1e-15, 1e-10, 1e-8, 1e-4, 1e-3,1e-2, 1, 5, 10, 20]
    #eplsion: The parameter epsilon controls the number of samples that should be classified as outliers. The smaller the epsilon, the more robust it is to outliers
    #epsilon = list(range(1.0,6.0,0.5))#float, greater than 1.0, default 1.35 (do 1 to 5)
    epsilon = np.append(np.linspace(1,5,9),[1.35])
    tol = [0.01, .001, 0.0001, .00001]

    #trackers for best model and its scores
    best_model = None
    best_model_scores = None
    best_hyperparameters = []

    run_once_flag = False
    for alpha_element in alpha:
        for epsilon_element in epsilon:
            for tol_element in tol:
                
                myModel = HuberRegressor(alpha=alpha_element, epsilon=epsilon_element, tol=tol_element)#should we set the 'normalize" parameter to true?, default is false
                
                #we get back a list of the scores
                myScores = evaluate_model(myModel, 'Huber Regression') # Least squares loss with L2 reg.
                
                #if index is 0, this is teh first iteration of this loop, so just set best model and score b.c. otherwise we'd have nothing to compare against
                if run_once_flag == False:
                    best_model = myModel
                    best_score = myScores
                    best_hyperparameters.append(alpha_element)
                    best_hyperparameters.append(epsilon_element)
                    best_hyperparameters.append(tol_element)
                    run_once_flag = True


                #check if we have a better model based on validaiton MSE score, and update if we do
                if myScores[1] < best_score[1]: #we want the validation MSE
                    best_model = myModel
                    best_score = myScores
                    best_hyperparameters =[]#clear any old ones
                    best_hyperparameters.append(alpha_element)
                    best_hyperparameters.append(epsilon_element)
                    best_hyperparameters.append(tol_element)

    #now that we've gon through all combinations of hyperparameters store everything in a bestModelObject and return the object, bestModelObjec is just a custom class that acts as a container
    return BestModelObject(best_model, best_score, best_hyperparameters)#return best model with best hyperparameters
def tune_random_forest_regression_hyperparameters():
    #hyperparameters
    #this has bootstrp sampling enabled by default, each tree gets its own sample
    n_estimators = list(range(1,26,1))#The number of trees in the forest.
    max_depth = list(range(5,26,1))#Max depth of the tree
    max_features = list(range(1,d+1,1))

    #trackers for best model and its scores
    best_model = None
    best_model_scores = None
    best_hyperparameters = []

    run_once_flag = False
    for n_estimators_element in n_estimators:
        #for max_depth_element in max_depth:
        for max_features_element in max_features:
            
            myModel = RandomForestRegressor(n_estimators=n_estimators_element, max_features=max_features_element)#should we set the 'normalize" parameter to true?, default is false
            
            #we get back a list of the scores
            myScores = evaluate_model(myModel, 'Random Forest Regression') # Least squares loss with L2 reg.
            
            #if index is 0, this is teh first iteration of this loop, so just set best model and score b.c. otherwise we'd have nothing to compare against
            if run_once_flag == False:
                best_model = myModel
                best_score = myScores
                best_hyperparameters.append(n_estimators_element)
                #best_hyperparameters.append(max_depth_element)
                best_hyperparameters.append(max_features_element)
                run_once_flag = True


            #check if we have a better model based on validaiton MSE score, and update if we do
            if myScores[1] < best_score[1]: #we want the validation MSE
                best_model = myModel
                best_score = myScores
                best_hyperparameters =[]#clear any old ones
                best_hyperparameters.append(n_estimators_element)
                #best_hyperparameters.append(max_depth_element)
                best_hyperparameters.append(max_features_element)

    #now that we've gon through all combinations of hyperparameters store everything in a bestModelObject and return the object
    return BestModelObject(best_model, best_score, best_hyperparameters)#return best model with best hyperparameters
def tune_lasso_regression_hyperparameters():
    #hyperparameters
    #depending on what get's picked as the best alpha, maybe we should consider another pass with a smaller window around the best value.
    alpha = [1e-15, 1e-10, 1e-8, 1e-5, 1e-4, 1e-3, 1e-2, 1, 5, 10]#range from 0 to infinity, don't use 0 - use plain linear regression if this is the case
    #alpha2 = []#if make a new set of values centered around the best from above?
    tol = [0.01, .001, 0.0001, .00001]
    positive = [True, False]

    #trackers for best model and its scores
    best_model = None
    best_model_scores = None
    best_hyperparameters = []

    run_once_flag = False
    for alpha_element in alpha:
        for tol_element in tol:
            for positive_element in positive:
                myModel = Lasso(alpha=alpha_element, tol=tol_element, positive=positive_element)#should we set the 'normalize" parameter to true?, default is false
                
                #we get back a list of the scores
                myScores = evaluate_model(myModel, 'Lasso Regression') # Least squares loss with L1 reg.
                
                #if index is 0, this is teh first iteration of this loop, so just set best model and score b.c. otherwise we'd have nothing to compare against
                if run_once_flag == False:
                    best_model = myModel
                    best_score = myScores
                    best_hyperparameters.append(alpha_element)
                    best_hyperparameters.append(tol_element)
                    best_hyperparameters.append(positive_element)
                    run_once_flag = True


                #check if we have a better model based on validaiton MSE score, and update if we do
                if myScores[1] < best_score[1]: #we want to check the validation MSE
                    best_model = myModel
                    best_score = myScores
                    best_hyperparameters =[]#clear any old ones
                    best_hyperparameters.append(alpha_element)
                    best_hyperparameters.append(tol_element)
                    best_hyperparameters.append(positive_element)

    #now that we've gon through all combinations of hyperparameters store everything in a bestModelObject and return the object
    return BestModelObject(best_model, best_score, best_hyperparameters)#return best model with best hyperparameters
def tune_ridge_regression_hyperparameters():
    #hyperparameters
    alpha = [1e-15, 1e-10, 1e-8, 1e-4, 1e-3, 1e-2, 1, 5, 10, 20]
    tol = [0.01, .001, 0.0001, .00001]
    solver = ['auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga']

    #trackers for best model and its scores
    best_model = None
    best_model_scores = None
    best_hyperparameters = []

    run_once_flag = False
    for alpha_element in alpha:
        for tol_element in tol:
            for solver_element in solver:
                
                myModel = Ridge(alpha=alpha_element, tol=tol_element, solver=solver_element)#should we set the 'normalize" parameter to true?, default is false
                
                #we get back a list of the scores
                myScores = evaluate_model(myModel, 'Ridge Regression') # Least squares loss with L2 reg.
                
                #if index is 0, this is teh first iteration of this loop, so just set best model and score b.c. otherwise we'd have nothing to compare against
                if run_once_flag == False:
                    best_model = myModel
                    best_score = myScores
                    best_hyperparameters.append(alpha_element)
                    best_hyperparameters.append(tol_element)
                    best_hyperparameters.append(solver_element)
                    run_once_flag = True


                #check if we have a better model based on validaiton MSE score, and update if we do
                if myScores[1] < best_score[1]: #we want the validation MSE
                    best_model = myModel
                    best_score = myScores
                    best_hyperparameters =[]#clear any old ones
                    best_hyperparameters.append(alpha_element)
                    best_hyperparameters.append(tol_element)
                    best_hyperparameters.append(solver_element)

    #now that we've gon through all combinations of hyperparameters store everything in a bestModelObject and return the object
    return BestModelObject(best_model, best_score, best_hyperparameters)#return best model with best hyperparameters
def tune_SVR_hyperparameters():
    #hyperparameters
    tol = [0.01, .001, 0.0001, .00001]
    C = [0.001, 0.01, 0.1, 0.5, 1, 1.5, 2.0, 2.5, 3]#C 5 to 3, in steps of .5
    kernel = ['linear', 'poly', 'sigmoid', 'rbf']
    degree = [2, 3, 4]#this will only be used if kernel is poly
    


    #trackers for best model and its scores
    best_model = None
    best_model_scores = None
    best_hyperparameters = []

    run_once_flag = False
    for tol_element in tol:
        for c_element in C:
            for kernel_element in kernel:
                if kernel_element == 'poly':#use the degree
                    for degree_element in degree:

                        myModel = SVR(tol=tol_element, C=c_element ,kernel=kernel_element, degree=degree_element)#should we set the 'normalize" parameter to true?, default is false
                        
                        #we get back a list of the scores
                        myScores = evaluate_model(myModel, 'Support Vector Regression') # Least squares loss with L2 reg.
                        
                        #if index is 0, this is teh first iteration of this loop, so just set best model and score b.c. otherwise we'd have nothing to compare against
                        if run_once_flag == False:
                            best_model = myModel
                            best_score = myScores
                            best_hyperparameters.append(tol_element)
                            best_hyperparameters.append(c_element)
                            best_hyperparameters.append(kernel_element)
                            best_hyperparameters.append(degree_element)
                            run_once_flag = True


                        #check if we have a better model based on validaiton MSE score, and update if we do
                        if myScores[1] < best_score[1]: #we want the validation MSE
                            best_model = myModel
                            best_score = myScores
                            best_hyperparameters =[]#clear any old ones
                            best_hyperparameters.append(tol_element)
                            best_hyperparameters.append(c_element)
                            best_hyperparameters.append(kernel_element)
                            best_hyperparameters.append(degree_element)
                else:
                    myModel = SVR(tol=tol_element, C=c_element ,kernel=kernel_element)#should we set the 'normalize" parameter to true?, default is false
                        
                    #we get back a list of the scores
                    myScores = evaluate_model(myModel, 'Support Vector Regression') # Least squares loss with L2 reg.
                    
                    #if index is 0, this is teh first iteration of this loop, so just set best model and score b.c. otherwise we'd have nothing to compare against
                    if run_once_flag == False:
                        best_model = myModel
                        best_score = myScores
                        best_hyperparameters.append(tol_element)
                        best_hyperparameters.append(c_element)
                        best_hyperparameters.append(kernel_element)
                        run_once_flag = True


                    #check if we have a better model based on validaiton MSE score, and update if we do
                    if myScores[1] < best_score[1]: #we want the validation MSE
                        best_model = myModel
                        best_score = myScores
                        best_hyperparameters =[]#clear any old ones
                        best_hyperparameters.append(tol_element)
                        best_hyperparameters.append(c_element)
                        best_hyperparameters.append(kernel_element)

    #now that we've gon through all combinations of hyperparameters store everything in a bestModelObject and return the object
    return BestModelObject(best_model, best_score, best_hyperparameters)#return best model with best hyperparameters
def tune_kernel_ridge_regression_hyperparameters():
    #hyperparameters
    alpha = [1e-15, 1e-10, 1e-8, 1e-4, 1e-3, 1e-2, 1, 5, 10, 20]
    kernel = ['linear', 'poly', 'sigmoid', 'rbf', 'laplacian']
    degree = [2, 3, 4]#this will only be used if kernel is poly
    

    #trackers for best model and its scores
    best_model = None
    best_model_scores = None
    best_hyperparameters = []

    run_once_flag = False
    for alpha_element in alpha:
        for kernel_element in kernel:
            if kernel_element == 'poly':#use the degree
                for degree_element in degree:
                    myModel = KernelRidge(alpha=alpha_element, kernel=kernel_element, degree=degree_element)#should we set the 'normalize" parameter to true?, default is false
                    
                    #we get back a list of the scores
                    myScores = evaluate_model(myModel, 'Kernel Ridge Regression') # Least squares loss with L2 reg.
                    
                    #if index is 0, this is teh first iteration of this loop, so just set best model and score b.c. otherwise we'd have nothing to compare against
                    if run_once_flag == False:
                        best_model = myModel
                        best_score = myScores
                        best_hyperparameters.append(alpha_element)
                        best_hyperparameters.append(kernel_element)
                        best_hyperparameters.append(degree_element)
                        run_once_flag = True


                    #check if we have a better model based on validaiton MSE score, and update if we do
                    if myScores[1] < best_score[1]: #we want the validation MSE
                        best_model = myModel
                        best_score = myScores
                        best_hyperparameters =[]#clear any old ones
                        best_hyperparameters.append(alpha_element)
                        best_hyperparameters.append(kernel_element)
                        best_hyperparameters.append(degree_element)
            else:
                myModel = KernelRidge(alpha=alpha_element, kernel=kernel_element)#should we set the 'normalize" parameter to true?, default is false
                
                #we get back a list of the scores
                myScores = evaluate_model(myModel, 'Kernel Ridge Regression') # Least squares loss with L2 reg.
                
                #if index is 0, this is teh first iteration of this loop, so just set best model and score b.c. otherwise we'd have nothing to compare against
                if run_once_flag == False:
                    best_model = myModel
                    best_score = myScores
                    best_hyperparameters.append(alpha_element)
                    best_hyperparameters.append(kernel_element)
                    run_once_flag = True


                #check if we have a better model based on validaiton MSE score, and update if we do
                if myScores[1] < best_score[1]: #we want the validation MSE
                    best_model = myModel
                    best_score = myScores
                    best_hyperparameters =[]#clear any old ones
                    best_hyperparameters.append(alpha_element)
                    best_hyperparameters.append(kernel_element)


    #now that we've gon through all combinations of hyperparameters store everything in a bestModelObject and return the object
    return BestModelObject(best_model, best_score, best_hyperparameters)#return best model with best hyperparameters
def tune_neural_network_regression_hyperparameters():
    #hyperparameters
    hidden_layer_sizes = []
    for i in range(1,2,1):#we want 1 to 3 layers
        for j in range(10,101,10):#layers from size 10 to 100 in steps of 10
            myList = [j] * i
            
            # if i == 1:
            #     myList = [s + ',' for s in map(str, myList)]
        
            #myString = ','.join(map(str, myList))
            myTuple = tuple(myList)
            hidden_layer_sizes.append(myTuple)
    
    activation = ['identity', 'logistic', 'tanh', 'relu']
    sovler = ['lbfgs']
    alpha = [1e-15, 1e-10, 1e-8, 1e-4, 1e-3, 1e-2, 1, 5, 10, 20]
    learning_rate = ['constant', 'invscaling', 'adaptive']
    #learning_rate_init = [0.001] dont use this unless we use sdg as our solver, but i don't think were doing that for such a small dataset
    tol = [0.01, .001, 0.0001, .00001]

    #trackers for best model and its scores
    best_model = None
    best_model_scores = None
    best_hyperparameters = []

    run_once_flag = False
    for activation_element in activation:
        for solver_element in sovler:
            for alpha_element in alpha:
                for learning_rate_element in learning_rate:
                    for tol_element in tol:
                        for hidden_element in hidden_layer_sizes:
                            #default is 1 hidden layer
                            myModel = MLPRegressor(activation=activation_element, solver=solver_element, alpha=alpha_element, learning_rate=learning_rate_element, tol=tol_element, hidden_layer_sizes=hidden_element)
                            
                            #we get back a list of the scores
                            myScores = evaluate_model(myModel, 'Neural Network Regression') # Least squares loss with L2 reg.
                            
                            #if index is 0, this is teh first iteration of this loop, so just set best model and score b.c. otherwise we'd have nothing to compare against
                            if run_once_flag == False:
                                best_model = myModel
                                best_score = myScores
                                best_hyperparameters.append(activation_element)
                                best_hyperparameters.append(solver_element)
                                best_hyperparameters.append(alpha_element)
                                best_hyperparameters.append(learning_rate_element)
                                best_hyperparameters.append(tol_element)
                                best_hyperparameters.append(hidden_element)
                                run_once_flag = True


                            #check if we have a better model based on validaiton MSE score, and update if we do
                            if myScores[1] < best_score[1]: #we want the validation MSE
                                best_model = myModel
                                best_score = myScores
                                best_hyperparameters =[]#clear any old ones
                                best_hyperparameters.append(activation_element)
                                best_hyperparameters.append(solver_element)
                                best_hyperparameters.append(alpha_element)
                                best_hyperparameters.append(learning_rate_element)
                                best_hyperparameters.append(tol_element)
                                best_hyperparameters.append(hidden_element)

    #now that we've gon through all combinations of hyperparameters store everything in a bestModelObject and return the object
    return BestModelObject(best_model, best_score, best_hyperparameters)#return best model with best hyperparameters