示例#1
0
def run_nn_opt():
    kfold = KFold(n_splits=5, shuffle=True, random_state=1)
    processor = P1()
    datasets = [Diabetes()]
    # 'random_hill_climb', 'simulated_annealing', 'genetic_alg'
    estimators = [
        Config(name='NN_%s' % title('random_hill_climb'),
               estimator=mlrose.NeuralNetwork(
                   algorithm='random_hill_climb',
                   random_state=1,
                   max_iters=200,
                   hidden_nodes=[64],
                   early_stopping=True,
               ),
               cv=kfold,
               params={'restarts': [0, 10, 20, 30, 40, 50]}),
        Config(name='NN_%s' % title('simulated_annealing'),
               estimator=mlrose.NeuralNetwork(
                   algorithm='simulated_annealing',
                   random_state=1,
                   max_iters=200,
                   hidden_nodes=[64],
                   early_stopping=True,
               ),
               cv=kfold,
               params={'max_iters': [200]}),
        Config(name='NN_%s' % title('genetic_alg'),
               estimator=mlrose.NeuralNetwork(
                   algorithm='genetic_alg',
                   random_state=1,
                   max_iters=200,
                   hidden_nodes=[64],
                   early_stopping=True,
               ),
               cv=kfold,
               params={
                   'pop_size': [100, 200, 300, 400, 500, 600, 700, 800, 900],
                   'mutation_prob':
                   [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
               }),
    ]

    for dataset in datasets:
        for estimator in estimators:
            estimator = processor.get_default_model(dataset=dataset,
                                                    estimator=estimator)
            processor.process_validations(dataset=dataset, estimator=estimator)
            processor.plot_validation()

    for dataset in datasets:
        for estimator in estimators:
            estimator = processor.get_default_model(dataset=dataset,
                                                    estimator=estimator)
            processor.param_selection(dataset=dataset, estimator=estimator)
            processor.print_best_params()

    for dataset in datasets:
        for estimator in estimators:
            processor.process(dataset=dataset, estimator=estimator)
            processor.plot_learning_curves()
示例#2
0
def neural_network_final_results():
    x_train, y_train = load_data('dataset1', 'train')
    x_test, y_test = load_data('dataset1', 'test')

    algorithm = 'random_hill_climb'
    rhc = mlr.NeuralNetwork(hidden_nodes = [10,10], activation = 'relu', \
                                 algorithm = algorithm, max_iters=5000, restarts=5, \
                                 bias = True, is_classifier = True, learning_rate = 0.1, \
                                 early_stopping = True, clip_max = 1, max_attempts = 100, \
                                 random_state = RANDOM_STATE, curve=True)

    print('Random Hill Climbing')
    start_time = time.time()
    rhc.fit(x_train, y_train)
    end_time = time.time()
    total_time = end_time - start_time
    print("Fit Time", total_time)

    y_pred = rhc.predict(x_test)
    test_accuracy = accuracy_score(y_test, y_pred)
    print('Accuracy', test_accuracy)

    algorithm = 'simulated_annealing'
    sa = mlr.NeuralNetwork(hidden_nodes = [10,10], activation = 'relu', \
                                 algorithm = algorithm, max_iters=5000, \
                                 bias = True, is_classifier = True, learning_rate = 0.1, \
                                 early_stopping = True, clip_max = 1, max_attempts = 100, \
                                 random_state = RANDOM_STATE, curve=True)

    print('Simulated Annealing')
    start_time = time.time()
    sa.fit(x_train, y_train)
    end_time = time.time()
    total_time = end_time - start_time
    print("Fit Time", total_time)

    y_pred = sa.predict(x_test)
    test_accuracy = accuracy_score(y_test, y_pred)
    print('Accuracy', test_accuracy)

    algorithm = 'genetic_alg'
    ga = mlr.NeuralNetwork(hidden_nodes = [10,10], activation = 'relu', \
                                 algorithm = algorithm, max_iters=1000, pop_size = 500, \
                                 bias = True, is_classifier = True, learning_rate = 0.1, \
                                 early_stopping = True, clip_max = 1, max_attempts = 100, \
                                 random_state = RANDOM_STATE, curve=True)

    print('Genetic Algorithm')
    start_time = time.time()
    ga.fit(x_train, y_train)
    end_time = time.time()
    total_time = end_time - start_time
    print("Fit Time", total_time)

    y_pred = ga.predict(x_test)
    test_accuracy = accuracy_score(y_test, y_pred)
    print('Accuracy', test_accuracy)
示例#3
0
def NN_SA(file_name, classifier_col):
    X_train, X_test, y_train, y_test = util.data_load(file_name, classifier_col)

    # SA HPs
    nodes = [128, 128, 128, 128]

    act = 'relu'
    seed = 1
    sa_algo = 'simulated_annealing'
    sa_lr = 10
    sa_iter = 10000
    sa_temp = 10000
    sa_decay = 0.92
    sa_ma = 50
    sa_clip = 10

    temperature = [0.1, 1, 10, 100, 1000, 10000]
    plt.figure()
    for t in temperature:
        print('temperature', t)
        sa_nn_model = mlrose.NeuralNetwork(hidden_nodes=nodes, activation=act, random_state=seed, bias=True,
                                           is_classifier=True, early_stopping=True, curve=True, algorithm=sa_algo,
                                           max_iters=sa_iter, learning_rate=sa_lr, clip_max=sa_clip, max_attempts=sa_ma,
                                           schedule=mlrose.GeomDecay(init_temp=t, decay=sa_decay))
        sa_nn_model.fit(X_train, y_train)
        plt.plot(sa_nn_model.fitness_curve, label='temp =' + str(t))

    plt.title("NN SA - Temperature")
    plt.xlabel('Iterations')
    plt.ylabel('Loss')
    plt.grid(True)
    plt.legend()
    plt.savefig("Images\\NN - SA - Temperature")
    plt.xscale('log')
    plt.savefig("Images\\NN - SA - Temperature - log")
    plt.show()

    decay_rates = [0.1, 0.2, 0.3, 0.4, 0.5, 0.8, 0.92]
    plt.figure()
    for dr in decay_rates:
        print('decay', dr)
        sa_nn_model = mlrose.NeuralNetwork(hidden_nodes=nodes, activation=act, random_state=seed, bias=True,
                                           is_classifier=True, early_stopping=True, curve=True, algorithm=sa_algo,
                                           max_iters=sa_iter, learning_rate=sa_lr, clip_max=sa_clip, max_attempts=sa_ma,
                                           schedule=mlrose.GeomDecay(init_temp=sa_temp, decay=dr))
        sa_nn_model.fit(X_train, y_train)
        plt.plot(sa_nn_model.fitness_curve, label='decay rate =' + str(dr))

    plt.title("NN SA - Decay Rate")
    plt.xlabel('Iterations')
    plt.ylabel('Loss')
    plt.grid(True)
    plt.legend()
    plt.savefig("Images\\NN - SA - Decay Rate")
    plt.xscale('log')
    plt.savefig("Images\\NN - SA - Decay Rate - log")
    plt.show()
示例#4
0
    def sa(self):
        iteration = self.noOfiteration
        problem_size_space = self.problem_size
        step = problem_size_space // 2
        # temperature_list=[1, 10, 50, 100, 250, 500, 1000, 2500, 5000, 10000]
        temperature_list = [1, 10, 50]
        # sa_params = { 'hidden_nodes': [(3,), (4,), (5,), (5, 5)],
        sa_params = {
            'hidden_nodes': [(3, ), (5, ), (5, 5), (10, 10, 10), (5, 5, 5)],
            #(10, 10), (5, 5, 5), (10, 10, 10), (20, 20, 20)]
            'max_iters': [x for x in range(0, iteration + 1, 1000)],
            'schedule': [
                mlrose.GeomDecay(init_temp=init_temp)
                for init_temp in temperature_list
            ],
            'learning_rate': [0.001, 0.01, 0.1],
            'activation': ['tanh', 'relu', 'sigmoid']
        }

        sa_model = mlrose.NeuralNetwork(random_state=1,
                                        algorithm='simulated_annealing',
                                        bias=False,
                                        is_classifier=True,
                                        learning_rate=0.001,
                                        early_stopping=True,
                                        clip_max=5,
                                        max_attempts=5000,
                                        curve=True)
        return sa_model, sa_params
示例#5
0
    def ga(self):
        iteration = self.noOfiteration
        problem_size_space = self.problem_size
        problem_size_space_exp = problem_size_space * 10
        step = problem_size_space_exp // 2
        # ga_params = { 'hidden_nodes': [(3,), (4,), (5,), (5, 5)],
        ga_params = {
            'hidden_nodes': [(3, ), (5, 5), (10, 10, 10)],
            #(10, 10), (5, 5, 5), (10, 10, 10), (20, 20, 20)]
            'max_iters': [x for x in range(0, iteration + 1, 1000)],
            'pop_size':
            [x for x in range(step, problem_size_space_exp + 1, step)],
            'mutation_prob': np.arange(.1, .7, .2),
            'learning_rate': [0.001, 0.01, 0.1]
        }

        ga_model = mlrose.NeuralNetwork(random_state=1,
                                        algorithm='genetic_alg',
                                        bias=False,
                                        is_classifier=True,
                                        learning_rate=0.001,
                                        early_stopping=True,
                                        clip_max=5,
                                        max_attempts=5000,
                                        curve=True)
        return ga_model, ga_params
示例#6
0
def neural_network_tune_rhc():
    features, labels = load_data('dataset1', 'train')
    x_train, x_test, y_train, y_test = train_test_split(
        features, labels, test_size=0.2, random_state=RANDOM_STATE)

    restarts = [10, 20, 30, 40, 50]
    for i in restarts:
        algorithm = 'random_hill_climb'
        rhc = mlr.NeuralNetwork(hidden_nodes = [10,10], activation = 'relu', \
                                    algorithm = algorithm, max_iters=5000, restarts=i, \
                                    bias = True, is_classifier = True, learning_rate = 0.1, \
                                    early_stopping = True, clip_max = 1, max_attempts = 100, \
                                    random_state = RANDOM_STATE, curve=True)

        print('Random Hill Climbing Restarts=', i)
        start_time = time.time()
        rhc.fit(x_train, y_train)
        end_time = time.time()
        total_time = end_time - start_time
        print("Fit Time", total_time)

        y_train_pred = rhc.predict(x_train)
        y_train_accuracy = accuracy_score(y_train, y_train_pred)

        print('Train Score', y_train_accuracy)
        y_test_pred = rhc.predict(x_test)
        y_test_accuracy = accuracy_score(y_test, y_test_pred)
        print('Validation Score:', y_test_accuracy)
示例#7
0
def eval_nn():
    exp = NNExperiment()
    alg2curve = {}
    max_loss = -1
    for alg in [
            'random_hill_climb', 'simulated_annealing', 'gradient_descent',
            'genetic_alg'
    ]:
        nn = mlrose.NeuralNetwork(
            hidden_nodes=[10],
            algorithm=alg,
            curve=True,
            random_state=42,
            clip_max=5,
            learning_rate=0.0001 if alg == 'gradient_descent' else 0.1,
            early_stopping=True,
            max_iters=1000,
            max_attempts=50,
            pop_size=10)
        exp.reset(alg, nn)
        exp.make_learning_curve()
        curve = exp.make_fitness_curve()
        if alg == 'gradient_descent':
            curve = [[abs(c), float(i)] for i, c in enumerate(curve)]
        for c in curve:
            try:
                if c[0] > max_loss:
                    max_loss = c[0]
            except:
                print(c)
                raise
        alg2curve[alg] = deepcopy(curve)
    agg_evaluate_algorithm('nn', alg2curve, max_loss, True)
示例#8
0
def neural_network_tune_ga():
    features, labels = load_data('dataset1', 'train')
    x_train, x_test, y_train, y_test = train_test_split(
        features, labels, test_size=0.2, random_state=RANDOM_STATE)

    population = [50, 100, 200, 500, 1000]

    for i in population:
        algorithm = 'genetic_alg'
        ga = mlr.NeuralNetwork(hidden_nodes = [10,10], activation = 'relu', \
                                    algorithm = algorithm, max_iters=1000, pop_size = i,  \
                                    bias = True, is_classifier = True, learning_rate = 0.1, \
                                    early_stopping = True, clip_max = 1, max_attempts = 100, \
                                    random_state = RANDOM_STATE, curve=True)

        print('Genetic Algorithm Population Size =', i)
        start_time = time.time()
        ga.fit(x_train, y_train)
        end_time = time.time()
        total_time = end_time - start_time
        print("Fit Time", total_time)

        y_train_pred = ga.predict(x_train)
        y_train_accuracy = accuracy_score(y_train, y_train_pred)

        print('Train Score', y_train_accuracy)
        y_test_pred = ga.predict(x_test)
        y_test_accuracy = accuracy_score(y_test, y_test_pred)
        print('Validation Score:', y_test_accuracy)
def ga(X_train, X_test, y_train, y_test):
  # Initialize neural network object and fit object
  nn_ga = mlrose_hiive.NeuralNetwork(hidden_nodes = [3], activation = 'relu',
                                  algorithm = 'genetic_alg', max_iters = 1000,
                                  is_classifier = True, learning_rate = 0.0001,
                                  clip_max = 5, max_attempts = 100,
                                  random_state = random_seed, curve=True)
  start = datetime.datetime.now()
  nn_ga.fit(X_train, y_train)
  finish = datetime.datetime.now()
  nn_ga_fittime = (finish - start).total_seconds()

  nn_ga_fitness = nn_ga.fitness_curve
  # Predict labels for train set and assess accuracy
  y_train_pred = nn_ga.predict(X_train)
  y_train_accuracy = accuracy_score(y_train, y_train_pred)

  # Predict labels for test set and assess accuracy
  y_test_pred = nn_ga.predict(X_test)
  y_test_accuracy = accuracy_score(y_test, y_test_pred)

  print('----------Neural Networks - Genetic Algorithm------------')
  print('Training score: ', y_train_accuracy)
  print('Test Score: ', y_test_accuracy)
  print('Fit Time: ', nn_ga_fittime)
  return nn_ga_fitness
示例#10
0
def neural_network_tune_sa():
    features, labels = load_data('dataset1', 'train')
    x_train, x_test, y_train, y_test = train_test_split(
        features, labels, test_size=0.2, random_state=RANDOM_STATE)

    decay = [
        mlr.GeomDecay(init_temp=1.0, decay=0.99, min_temp=0.001),
        mlr.GeomDecay(init_temp=1.0, decay=0.8, min_temp=0.001),
        mlr.GeomDecay(init_temp=1.0, decay=0.6, min_temp=0.001),
        mlr.GeomDecay(init_temp=1.0, decay=0.4, min_temp=0.001),
        mlr.GeomDecay(init_temp=1.0, decay=0.2, min_temp=0.001)
    ]

    for i in decay:
        algorithm = 'simulated_annealing'
        sa = mlr.NeuralNetwork(hidden_nodes = [10,10], activation = 'relu', \
                                    algorithm = algorithm, max_iters=5000, schedule=i, \
                                    bias = True, is_classifier = True, learning_rate = 0.1, \
                                    early_stopping = True, clip_max = 1, max_attempts = 100, \
                                    random_state = RANDOM_STATE, curve=True)

        print('Simulated Annealing Decay Rate=', i)
        start_time = time.time()
        sa.fit(x_train, y_train)
        end_time = time.time()
        total_time = end_time - start_time
        print("Fit Time", total_time)

        y_train_pred = sa.predict(x_train)
        y_train_accuracy = accuracy_score(y_train, y_train_pred)

        print('Train Score', y_train_accuracy)
        y_test_pred = sa.predict(x_test)
        y_test_accuracy = accuracy_score(y_test, y_test_pred)
        print('Validation Score:', y_test_accuracy)
示例#11
0
def mlp_gd(max_iters):
    return mlrose_hiive.NeuralNetwork(
        hidden_nodes=[10, 10, 10],
        algorithm='gradient_descent',
        activation='tanh',
        learning_rate=0.001,  # 0.1, 0.01, 0.001, 0.0001, 0.00001
        early_stopping=True,
        random_state=SEED,
        max_iters=max_iters)
示例#12
0
def mlp_rhc(max_iters):
    return mlrose_hiive.NeuralNetwork(
        hidden_nodes=[10, 10, 10],
        activation='tanh',
        learning_rate=0.01,  # 0.1, 0.01, 0.001, 0.0001, 0.000001
        max_iters=max_iters,
        early_stopping=True,
        random_state=SEED,
        restarts=30,  # 5, 10, ...
        max_attempts=5  # 5, 10, ...
    )
示例#13
0
def mlp_ga(max_iters):
    return mlrose_hiive.NeuralNetwork(
        hidden_nodes=[10, 10, 10],
        max_iters=max_iters,
        algorithm='genetic_alg',
        activation='tanh',
        learning_rate=0.001,  # 0.1, 0.01, 0.001, 0.0001, 0.000001
        pop_size=200,  # 100, 150, 200, 300
        mutation_prob=0.1,  # 0.05, 0.1, 0.3
        early_stopping=True,
        random_state=SEED)
示例#14
0
def generate_nn_model(alg_name, hidden_nodes=[30, 20], seed=None):
    nn_model = None

    ## RHC
    if (alg_name == 'rhc'):
        nn_model = mlrose_hiive.NeuralNetwork(hidden_nodes=hidden_nodes, activation='relu',
                                              algorithm='random_hill_climb',
                                              restarts=50,
                                              bias=True, is_classifier=True,
                                              early_stopping=True, clip_max=5,
                                              random_state=seed,
                                              max_iters=1000,
                                              learning_rate=0.001,
                                              max_attempts=100)
    ## SA
    elif (alg_name == 'sa'):
        nn_model = mlrose_hiive.NeuralNetwork(hidden_nodes=hidden_nodes, activation='relu',
                                              algorithm='simulated_annealing',
                                              schedule=mlrose_hiive.ExpDecay(),
                                              bias=True, is_classifier=True,
                                              early_stopping=True, clip_max=5,
                                              random_state=seed,
                                              max_iters=1000,
                                              learning_rate=0.0001,
                                              max_attempts=100)
    ## GA
    elif (alg_name == 'ga'):
        nn_model = mlrose_hiive.NeuralNetwork(hidden_nodes=hidden_nodes, activation='relu',
                                              algorithm='genetic_alg',
                                              pop_size = 200,
                                              mutation_prob = 0.25,
                                              bias=True, is_classifier=True,
                                              early_stopping=True, clip_max=5,
                                              random_state=seed,
                                              max_iters=1000,
                                              learning_rate=0.0001,
                                              max_attempts=100)
    else:
       print('Algorithm Name Error') 

    return nn_model
示例#15
0
def mlp_sa(max_iters):
    return mlrose_hiive.NeuralNetwork(
        hidden_nodes=[10, 10, 10],
        algorithm='simulated_annealing',
        activation='tanh',
        learning_rate=0.1,  # 0.1, 0.01, 0.001, 0.0001, 0.000001
        max_iters=max_iters,
        early_stopping=True,
        schedule=mlrose_hiive.GeomDecay(),  # ArithDecay, ExpDecay, GeomDecay
        random_state=SEED,
        max_attempts=100  # 10, 100, 500
    )
def get_model(algorithm, max_iters):
    activation = "relu"
    print(algorithm)
    print(max_iters)
    if algorithm == "rh":
        return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'random_hill_climb',  \
                                bias = True,  is_classifier = True, early_stopping = True, restarts = 5, max_attempts =10,
                                max_iters = max_iters, clip_max = 10, random_state = randomSeed)
    if algorithm == "ga":
        return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'genetic_alg',  \
                                bias = True,  is_classifier = True, early_stopping = True,  max_attempts =10,
                                max_iters = max_iters, clip_max = 10, mutation_prob = .10, random_state = randomSeed)
    if algorithm == "sa":
        return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'simulated_annealing',  \
                                bias = True,  is_classifier = True, early_stopping = True,  max_attempts =10,
                                max_iters = max_iters, clip_max = 10, schedule = mlrose.GeomDecay(), random_state = randomSeed)

    if algorithm == "gd":
        return mlrose.NeuralNetwork(hidden_nodes = [10], activation = activation, algorithm = 'gradient_descent',  \
                                bias = True,  is_classifier = True, early_stopping = True,  max_attempts =10,
                                max_iters = max_iters, clip_max = 10, random_state = randomSeed)
示例#17
0
def genetic_algorithm(X_train, X_test, y_train, y_test, verbose=False):
    max_iter_range = np.arange(50, 500, 50)
    kwargs = {
        'hidden_nodes': HIDDEN_NODES,
        'activation': 'relu',
        'learning_rate': LEARNING_RATE,
        'random_state': RANDOM_SEED_VAL,
        'curve': True,

        # algorithm-specific
        'algorithm': 'genetic_alg',

    }

    nn_ga = mlrose.NeuralNetwork(**kwargs)

    nn_ga.fit(X_train, y_train)

    # plot fitness curve
    plot_title = "NN weight opt - GA: Ffitness vs. iterations"
    plotting_nn.plot_fitness_curves(
        fitness_data=pd.DataFrame(nn_ga.fitness_curve),
        title=plot_title,
    )
    plt.savefig('graphs/nn_ga_fitness_curve.png')
    plt.clf()

    # plot iterative learning curve
    lc_df = nn_iterative_lc(X_train, y_train, max_iter_range, kwargs)
    plotting_nn.plot_iterative_lc(
        lc_df,
        title="Learning Curve (max_iters) for GA",
        max_iter_range=max_iter_range,
    )
    plt.savefig('graphs/nn_ga_lc_iterations.png')
    plt.clf()

    # plot learning curve
    train_sizes = np.linspace(0.1, 0.9, 9)
    data_proc.plot_learning_curve(
        nn_ga,
        title="Learning curve for GA",
        X=X_train,
        y=y_train,
        cv=CV_VAL,
        train_sizes=train_sizes,
    )
    plt.savefig('graphs/nn_ga_lc.png')
    plt.clf()
示例#18
0
def NN_GD(file_name, classifier_col):
    X_train, X_test, y_train, y_test = util.data_load(file_name,
                                                      classifier_col)

    # GD HPs
    nodes = [128, 128, 128, 128]

    act = 'relu'
    seed = 1
    gd_algo = 'gradient_descent'
    gd_lr = 0.00000009
    gd_iter = 10000
    gd_ma = 50
    gd_clip = 5

    learning_rates = [
        0.00000009, 0.00000001, 0.000000001, 0.0000001, 0.000001, 0.00001,
        0.0001, 0.001, 0.01, 0.1
    ]
    plt.figure()
    for lr in learning_rates:
        print('lr', lr)
        gd_nn_model = mlrose.NeuralNetwork(hidden_nodes=nodes,
                                           activation=act,
                                           random_state=seed,
                                           bias=True,
                                           is_classifier=True,
                                           early_stopping=True,
                                           curve=True,
                                           algorithm=gd_algo,
                                           max_iters=gd_iter,
                                           learning_rate=lr,
                                           clip_max=gd_clip,
                                           max_attempts=gd_ma)
        gd_nn_model.fit(X_train, y_train)
        gd_curve = gd_nn_model.fitness_curve

        inverted_gd_curve = np.array(gd_curve) * -1
        plt.plot(inverted_gd_curve, label='lr =' + str(lr))

    plt.title("NN GD - Learning Rates")
    plt.xlabel('Iterations')
    plt.ylabel('Loss')
    plt.grid(True)
    plt.legend()
    plt.savefig("Images\\NN - GD - Learning Rates")
    plt.xscale('log')
    plt.savefig("Images\\NN - GD - Learning Rates - log")
    plt.show()
示例#19
0
def train_nn(X_train, y_train, **kwargs):
    data_hash = joblib.hash([kwargs, X_train, y_train])
    file_name = "cache/nn/%s.dump" % (data_hash)
    if os.path.exists(file_name):
        print("loading nn from cache for hash=%s, args=%s" %
              (data_hash, kwargs))
        return joblib.load(file_name)

    print("Building nn with %s" % (kwargs))
    nn = mlrose.NeuralNetwork(**kwargs)
    start = time.time()
    nn.fit(X_train, y_train)
    nn.time = time.time() - start
    joblib.dump(nn, file_name, compress=3)
    return nn
示例#20
0
def NN_GA(file_name, classifier_col):
    X_train, X_test, y_train, y_test = util.data_load(file_name, classifier_col)
    activation = ['relu']
    learning_rate = [5, 0.01, 0.1, 1, 2, 3, 4, 7, 10]
    algorithim = 'genetic_alg'
    iters = [1000, 10000, 50000, 100000]
    nodes = [128, 128, 128, 128]
    population = [2000, 2100,2200,2300]
    mutation = [ 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1]
    outcomes = []
    max_attempts = [10, 50, 100, 200, 500, 1000]
    clips = [5, 10, 100, 1000, 10000, 100000]

    # GA HPs
    nodes = [128, 128, 128, 128]
    act = 'relu'
    seed = 1
    ga_algo = 'genetic_alg'
    ga_lr = 5
    ga_iter = 100
    ga_pop = 1500
    ga_mut = 0.1
    ga_ma = 100
    ga_clip = 5

    Population = [100, 200, 300, 400, 500, 750, 1000, 1240, 1500]
    plt.figure()
    for p in Population:
        print('Population', p)
        ga_nn_model = mlrose.NeuralNetwork(hidden_nodes=nodes, activation=act, max_iters=ga_iter,
                                        algorithm=ga_algo, pop_size=p, mutation_prob=ga_mut,
                                        bias=True, is_classifier=True, learning_rate=ga_lr,
                                        early_stopping=True, clip_max=ga_clip, max_attempts=ga_ma,
                                        random_state=seed, curve=True)
        ga_nn_model.fit(X_train, y_train)
        plt.plot(ga_nn_model.fitness_curve, label='pop =' + str(p))

    plt.title("NN GA - Population")
    plt.xlabel('Iterations')
    plt.ylabel('Loss')
    plt.grid(True)
    plt.legend()
    plt.savefig("Images\\NN - GA - Population")
    plt.xscale('log')
    plt.savefig("Images\\NN - GA - Population - log")
    plt.show()
示例#21
0
def rhc_nn(data):
    results = []
    for restarts in [0, 2, 4, 8]:
        nn = mlrose.NeuralNetwork(hidden_nodes=[8, 8],
                                  activation='relu',
                                  algorithm='random_hill_climb',
                                  max_iters=10000,
                                  learning_rate=0.1,
                                  early_stopping=True,
                                  max_attempts=500,
                                  restarts=restarts,
                                  clip_max=5,
                                  random_state=0,
                                  curve=True)

        results.append(run_nn(*data, nn, restarts=restarts))

    return results
示例#22
0
def ga_nn(data):
    results = []
    for pop_size in [64, 128, 256, 512]:
        nn = mlrose.NeuralNetwork(hidden_nodes=[8, 8],
                                  activation='relu',
                                  algorithm='genetic_alg',
                                  max_iters=1000,
                                  learning_rate=0.001,
                                  early_stopping=True,
                                  max_attempts=100,
                                  pop_size=pop_size,
                                  clip_max=5,
                                  random_state=0,
                                  curve=True)

        results.append(run_nn(*data, nn, pop_size=pop_size))

    return results
示例#23
0
def sa_nn(data):
    results = []
    for decay in [0.95, 0.975, 0.99, 0.995]:
        nn = mlrose.NeuralNetwork(hidden_nodes=[8, 8],
                                  activation='relu',
                                  algorithm='simulated_annealing',
                                  max_iters=10000,
                                  learning_rate=0.1,
                                  early_stopping=True,
                                  max_attempts=500,
                                  schedule=mlrose.GeomDecay(decay=decay),
                                  clip_max=5,
                                  random_state=0,
                                  curve=True)

        results.append(run_nn(*data, nn, decay=decay))

    return results
示例#24
0
def nn_iterative_lc(X, y, max_iter_range, kwargs, cv=None):
    df = pd.DataFrame(index=max_iter_range, columns=['train', 'cv', 'train_time', 'cv_time'])
    for i in max_iter_range:
        kwargs['max_iters'] = i.item()
        mlr_nn = mlrose.NeuralNetwork(**kwargs)

        # train data
        train_t0 = time()
        mlr_nn.fit(X, y)
        train_time = time() - train_t0
        train_score = mlr_nn.score(X, y)

        # get cv scores
        cv_t0 = time()
        cross_vals = cross_val_score(mlr_nn, X, y, cv=CV_VAL)
        cv_time = time() - cv_t0
        cv_mean = np.mean(cross_vals)

        df.loc[i, 'train'] = train_score
        df.loc[i, 'cv'] = cv_mean
        df.loc[i, 'train_time'] = train_time
        df.loc[i, 'cv_time'] = cv_time

    return df.astype('float64')
def gradient_descent_benchmark():
    x_data, y_data = process_dataset.process_census_data()
    x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, train_size=0.6, random_state=86)

    scaler = MinMaxScaler()
    x_train_scaled = scaler.fit_transform(x_train)
    x_test_scaled = scaler.fit_transform(x_test)

    ann = mlrose_hiive.NeuralNetwork(hidden_nodes=[32, 32], activation='relu', algorithm='gradient_descent',
                                     max_iters=1000, bias=True, is_classifier=True, learning_rate=0.00001,
                                     max_attempts=200, early_stopping=True, curve=True, random_state=1)

    ann.fit(x_train_scaled, y_train)

    # predictions = ann.predict(x_test_scaled)
    score = accuracy_score(y_test, ann.predict(x_test_scaled))
    print("accuracy was: " + str(score))

    # figure = evaluate_model_learning_complexity.plot_learning_curve(ann, "DONOTUSE.png", x_train_scaled, y_train_hot)
    # figure.savefig("ANN_Gradient_Descent.png")

    plt.figure()
    plt.plot(ann.fitness_curve)
    plt.show()
示例#26
0
    def rhc(self):
        iteration = self.noOfiteration
        problem_size_space = self.problem_size
        step = problem_size_space // 2
        # rhc_params = { 'hidden_nodes': [(3,), (4,), (5,), (5, 5)],
        rhc_params = {
            'hidden_nodes': [(3, ), (5, ), (5, 5), (10, 10, 10), (5, 5, 5)],
            #(10, 10), (5, 5, 5), (10, 10, 10), (20, 20, 20)]
            'max_iters': [x for x in range(0, iteration + 1, 1000)],
            'restarts': [x for x in range(0, problem_size_space + 1, step)],
            'activation': ['tanh', 'relu', 'sigmoid'],
            'learning_rate': [0.001, 0.01, 0.1]
        }

        rhc_model = mlrose.NeuralNetwork(random_state=1,
                                         algorithm='random_hill_climb',
                                         bias=False,
                                         is_classifier=True,
                                         learning_rate=0.001,
                                         early_stopping=True,
                                         clip_max=5,
                                         max_attempts=5000,
                                         curve=True)
        return rhc_model, rhc_params
示例#27
0
    def backprop(self):
        iteration = self.noOfiteration
        problem_size_space = self.problem_size
        problem_size_space_exp = problem_size_space * 10
        step = problem_size_space_exp // 2
        # backprop_params = { 'hidden_nodes': [(3,), (4,), (5,), (5, 5)],
        backprop_params = {
            'hidden_nodes': [(3, ), (5, 5), (10, 10, 10), (5, 5, 5),
                             (10, 10, 10), (20, 20, 20), (5, )],
            #(10, 10), (5, 5, 5), (10, 10, 10), (20, 20, 20)]
            'activation': ['tanh', 'relu', 'sigmoid'],
            'max_iters': [x for x in range(0, iteration + 1, 1000)],
            'learning_rate': [0.001, 0.005, 0.01, 0.1, 0.5]
        }

        backprop_model = mlrose.NeuralNetwork(random_state=1,
                                              algorithm='gradient_descent',
                                              bias=False,
                                              is_classifier=True,
                                              early_stopping=True,
                                              clip_max=5,
                                              max_attempts=5000,
                                              curve=True)
        return backprop_model, backprop_params
示例#28
0
文件: nn.py 项目: abagde93/CS7641
def nn_impl():

    #iris_data = fetch_openml('iris')
    #X_whole, y_whole = iris_data['data'], iris_data['target']

    sklearn_data = datasets.load_breast_cancer()
    x, y = sklearn_data.data, sklearn_data.target
    #x = preprocessing.scale(x)

    # Split the initial data
    xtrain, xtest, ytrain, ytest = train_test_split(x,
                                                    y,
                                                    test_size=0.4,
                                                    random_state=42)

    ### Analysis for RHC ###
    train_accuracy_scores = []
    test_accuracy_scores = []
    time_per_iteration_rhc = []

    for i in range(1, 3000, 50):
        print(i)
        rhc_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                            activation='identity',
                                            algorithm='random_hill_climb',
                                            bias=False,
                                            is_classifier=True,
                                            learning_rate=0.6,
                                            clip_max=1,
                                            max_attempts=1000,
                                            max_iters=i)

        start = time.time()
        rhc_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = rhc_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = rhc_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

        time_per_iteration_rhc.append(time.time() - start)

    plt.figure()
    plt.plot(np.arange(1, 3000, 50),
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(np.arange(1, 3000, 50),
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Iterations')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (RHC)')
    plt.legend()
    plt.savefig('testacc_iter_rhc.png')

    print("Finished RHC")

    ### Analysis for Simulated Annealing ###
    train_accuracy_scores = []
    test_accuracy_scores = []
    time_per_iteration_sa = []

    for i in range(1, 3000, 50):
        print(i)
        sa_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                           activation='identity',
                                           algorithm='simulated_annealing',
                                           bias=False,
                                           is_classifier=True,
                                           learning_rate=0.6,
                                           clip_max=1,
                                           max_attempts=1000,
                                           max_iters=i)

        start = time.time()
        sa_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = sa_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = sa_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

        time_per_iteration_sa.append(time.time() - start)

    plt.figure()
    plt.plot(np.arange(1, 3000, 50),
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(np.arange(1, 3000, 50),
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Iterations')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (SA)')
    plt.legend()
    plt.savefig('testacc_iter_SA.png')

    print("Finished SA")

    ### Analysis for Genetic Algorithms ###
    train_accuracy_scores = []
    test_accuracy_scores = []
    time_per_iteration_ga = []

    for i in range(1, 3000, 50):
        print(i)
        ga_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                           activation='identity',
                                           algorithm='genetic_alg',
                                           bias=False,
                                           is_classifier=True,
                                           learning_rate=0.6,
                                           clip_max=1,
                                           max_attempts=1000,
                                           max_iters=i)

        start = time.time()
        ga_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = ga_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = ga_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

        time_per_iteration_ga.append(time.time() - start)

    plt.figure()
    plt.plot(np.arange(1, 3000, 50),
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(np.arange(1, 3000, 50),
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Iterations')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (GA)')
    plt.legend()
    plt.savefig('testacc_iter_GA.png')

    print("Finished GA")

    ### Backpropogation (for comparison) ###
    train_accuracy_scores = []
    test_accuracy_scores = []
    time_per_iteration_bp = []
    print("backprop start")
    for i in range(1, 3000, 50):
        print(i)
        bp_nn = MLPClassifier(hidden_layer_sizes=(50, ),
                              activation='logistic',
                              max_iter=i)
        # bp_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2], activation='identity',
        #                         algorithm='gradient_descent',
        #                         bias=False, is_classifier=True,
        #                         learning_rate = 0.6, clip_max=1,
        #                         max_attempts=1000, max_iters = i)

        start = time.time()
        bp_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = bp_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = bp_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

        time_per_iteration_bp.append(time.time() - start)

    plt.figure()
    plt.plot(np.arange(1, 3000, 50),
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(np.arange(1, 3000, 50),
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Iterations')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (Backpropogation)')
    plt.legend()
    plt.savefig('testacc_iter_bp.png')

    print("Finished Backprop")

    ### Plot runtimes for above ###
    plt.figure()
    plt.plot(np.arange(1, 3000, 50),
             np.array(time_per_iteration_rhc),
             label='RHC')
    plt.plot(np.arange(1, 3000, 50),
             np.array(time_per_iteration_sa),
             label='SA')
    plt.plot(np.arange(1, 3000, 50),
             np.array(time_per_iteration_ga),
             label='GA')
    plt.plot(np.arange(1, 3000, 50),
             np.array(time_per_iteration_ga),
             label='BP')
    plt.xlabel('Iterations')
    plt.ylabel('Training Time')
    plt.title('Training Time vs Iterations')
    plt.legend()
    plt.savefig('time_vs_iter.png')

    #### Hyperparameter Tuning - RHC ####
    ## Adjusting the number of random restarts ##
    train_accuracy_scores = []
    test_accuracy_scores = []

    for i in range(0, 500, 25):
        print(i)
        rhc_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                            activation='identity',
                                            algorithm='random_hill_climb',
                                            bias=False,
                                            is_classifier=True,
                                            learning_rate=0.6,
                                            clip_max=1,
                                            max_attempts=1000,
                                            restarts=i)

        rhc_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = rhc_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = rhc_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

    plt.figure()
    plt.plot(np.arange(0, 500, 25),
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(np.arange(0, 500, 25),
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Restarts')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Number of Restarts (RHC)')
    plt.legend()
    plt.savefig('rhc_restarts.png')

    print("Finished RHC HP Tuning")

    #### Hyperparameter Tuning - SA ####
    ## Adjusting the type of scheduling ##
    train_accuracy_scores = []
    test_accuracy_scores = []

    # Referending sectiion 2.2 'Decay Schedules' here:
    # https://readthedocs.org/projects/mlrose/downloads/pdf/stable/

    schedule_types = [
        mlrose_hiive.ExpDecay(),
        mlrose_hiive.ArithDecay(),
        mlrose_hiive.GeomDecay()
    ]

    for st in schedule_types:
        print(st)
        sa_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                           activation='identity',
                                           algorithm='simulated_annealing',
                                           bias=False,
                                           is_classifier=True,
                                           learning_rate=0.6,
                                           clip_max=1,
                                           max_attempts=1000,
                                           schedule=st)

        sa_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = sa_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = sa_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

    plt.figure()
    plt.plot(['ExpDecay', 'ArithDecay', 'GeomDecay'],
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(['ExpDecay', 'ArithDecay', 'GeomDecay'],
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('Schedule Type')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Schedule Type (SA)')
    plt.legend()
    plt.savefig('sa_schedule_type.png')

    print("Finished SA HP Tuning")

    #### Hyperparameter Tuning - GA ####

    ## Adjusting the amount of mutation
    ## Used api as referenced in https://readthedocs.org/projects/mlrose/downloads/pdf/stable/
    train_accuracy_scores = []
    test_accuracy_scores = []

    mutation_prob_array = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
    for i in mutation_prob_array:
        ga_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                           activation='relu',
                                           algorithm='genetic_alg',
                                           bias=False,
                                           is_classifier=True,
                                           learning_rate=0.6,
                                           clip_max=1,
                                           max_attempts=1000,
                                           mutation_prob=i)

        ga_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = ga_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = ga_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

    plt.figure()
    plt.plot(mutation_prob_array,
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(mutation_prob_array,
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('mutation_prob')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (GA - mutation_prob experimentation)')
    plt.legend()
    plt.savefig('ga_mutation.png')

    print("Finished GA mutation experimentation")

    ## Adjusting the population size
    ## Used api as referenced in https://readthedocs.org/projects/mlrose/downloads/pdf/stable/
    train_accuracy_scores = []
    test_accuracy_scores = []

    pop_size_array = [100, 200, 300, 400, 500]
    for i in pop_size_array:
        ga_nn = mlrose_hiive.NeuralNetwork(hidden_nodes=[2],
                                           activation='relu',
                                           algorithm='genetic_alg',
                                           bias=False,
                                           is_classifier=True,
                                           learning_rate=0.6,
                                           clip_max=1,
                                           max_attempts=1000,
                                           pop_size=i)

        ga_nn.fit(xtrain, ytrain)

        # Train set analysis
        predictions_train = ga_nn.predict(xtrain)
        accuracy_score_train = accuracy_score(ytrain, predictions_train)
        train_accuracy_scores.append(accuracy_score_train)

        # Test set analysis
        predictions_test = ga_nn.predict(xtest)
        accuracy_score_test = accuracy_score(ytest, predictions_test)
        test_accuracy_scores.append(accuracy_score_test)

    plt.figure()
    plt.plot(pop_size_array,
             np.array(train_accuracy_scores),
             label='Train Accuracy')
    plt.plot(pop_size_array,
             np.array(test_accuracy_scores),
             label='Test Accuracy')
    plt.xlabel('pop_size')
    plt.ylabel('Accuracy')
    plt.title('Accuracy vs. Iterations (GA - pop_size experimentation)')
    plt.legend()
    plt.savefig('ga_popsize.png')

    print("Finished GA pop_size experimentation")
scaler = preprocessing.StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)

for algo in algos:
    print('--------------New Algo ', algo, '----------------------')

    nn_model = mlrose.NeuralNetwork(hidden_nodes=[3],
                                    activation='relu',
                                    algorithm=algo,
                                    max_iters=100,
                                    bias=True,
                                    is_classifier=True,
                                    learning_rate=0.0001,
                                    early_stopping=False,
                                    clip_max=1,
                                    restarts=30,
                                    schedule=mlrose.ExpDecay(1),
                                    pop_size=300,
                                    mutation_prob=0.4,
                                    max_attempts=10,
                                    random_state=4,
                                    curve=False)

    print('Fitting model...')
    nn_model.fit(x_train, y_train)

    y_train_pred = nn_model.predict(x_train)

    print('Training Score: ', accuracy_score(y_train, y_train_pred))
示例#30
0
def NN_SA(file_name, classifier_col):
    X_train, X_test, y_train, y_test = util.data_load(file_name,
                                                      classifier_col)

    activation = 'relu'
    learning_rate = [0.01, 0.1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
    algorithim = 'simulated_annealing'
    iters = [
        100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 2000, 3000, 4000,
        5000, 6000, 7000, 8000, 9000, 10000
    ]
    nodes = [128, 128, 128, 128]
    temperatures = [0.001, 0.01]
    decay_rates = [0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99]
    outcomes = []
    max_attempts = [10, 50, 100, 200, 500, 1000]
    clips = [5, 10, 100, 1000, 10000, 100000]

    csv_file = 'NN_SA-itertests.csv'
    act = 'relu'
    lr = 10
    itera = 10000
    temp = 10000
    dec = 0.92
    ma = 50
    clip = 10

    while 1 == 1:

        iters_outs = {}

        for iter_test in iters:
            start = time.time()

            print(algorithim, act, lr, iter_test, 'GeomDecay', temp, dec, ma,
                  clip)
            nn_model = mlrose.NeuralNetwork(hidden_nodes=nodes,
                                            activation=act,
                                            max_iters=iter_test,
                                            algorithm=algorithim,
                                            schedule=mlrose.GeomDecay(
                                                init_temp=temp, decay=dec),
                                            bias=True,
                                            is_classifier=True,
                                            learning_rate=lr,
                                            early_stopping=True,
                                            clip_max=clip,
                                            max_attempts=ma,
                                            random_state=1,
                                            curve=True)
            nn_model.fit(X_train, y_train)
            train_time = time.time() - start
            print('Train time', train_time)

            start = time.time()
            y_train_pred = nn_model.predict(X_train)
            y_train_roc = roc_auc_score(y_train,
                                        y_train_pred,
                                        multi_class="ovr",
                                        average="weighted")
            print('y_train_roc', y_train_roc)

            y_train_query_time = time.time() - start
            print('y_train_query_time', y_train_query_time)

            start = time.time()
            y_test_pred = nn_model.predict(X_test)
            y_test_roc = roc_auc_score(y_test,
                                       y_test_pred,
                                       multi_class="ovr",
                                       average="weighted")
            print('y_test_roc', y_test_roc)

            y_test_query_time = time.time() - start
            print('y_test_query_time', y_test_query_time)
            nn_loss = nn_model.loss
            print('loss', nn_loss)
            outcome = {}
            outcome['schedule'] = 'GeomDecay'
            outcome['activation'] = act
            outcome['learning_rate'] = lr
            outcome['max_iters'] = iter_test
            outcome['temperatures'] = temp
            outcome['decay_rates'] = dec
            outcome['max_attempts'] = ma
            outcome['clip'] = clip
            outcome['y_train_roc'] = y_train_roc
            outcome['y_test_roc'] = y_test_roc
            outcome[
                'runtime'] = train_time + y_train_query_time + y_test_query_time
            outcome['Train time'] = train_time
            outcome['y_train_query_time'] = y_train_query_time
            outcome['y_test_query_time'] = y_test_query_time
            outcome['loss'] = nn_loss

            outcomes.append(outcome)
            pd.DataFrame(outcomes).to_csv(csv_file)

            iters_outs[iter_test] = y_test_roc

        old_val = itera
        itera = max(iters_outs, key=iters_outs.get)
        print('best iter', itera, 'old', old_val)

        raise SystemExit(0)

        temp_outs = {}

        for temp_test in temperatures:
            start = time.time()

            print(algorithim, act, lr, itera, 'GeomDecay', temp_test, dec, ma,
                  clip)
            nn_model = mlrose.NeuralNetwork(
                hidden_nodes=nodes,
                activation=act,
                max_iters=itera,
                algorithm=algorithim,
                schedule=mlrose.GeomDecay(init_temp=temp_test, decay=dec),
                bias=True,
                is_classifier=True,
                learning_rate=lr,
                early_stopping=True,
                clip_max=clip,
                max_attempts=ma,
                random_state=1,
                curve=True)
            nn_model.fit(X_train, y_train)
            y_train_pred = nn_model.predict(X_train)
            y_train_roc = roc_auc_score(y_train,
                                        y_train_pred,
                                        multi_class="ovr",
                                        average="weighted")
            print('y_train_roc', y_train_roc)

            y_test_pred = nn_model.predict(X_test)
            y_test_roc = roc_auc_score(y_test,
                                       y_test_pred,
                                       multi_class="ovr",
                                       average="weighted")
            print('y_test_roc', y_test_roc)

            runtime = time.time() - start
            print('curr run time', time.time() - start)

            outcome = {}
            outcome['schedule'] = 'GeomDecay'
            outcome['activation'] = act
            outcome['learning_rate'] = lr
            outcome['max_iters'] = itera
            outcome['temperatures'] = temp_test
            outcome['decay_rates'] = dec
            outcome['max_attempts'] = ma
            outcome['clip'] = clip
            outcome['y_train_roc'] = y_train_roc
            outcome['y_test_roc'] = y_test_roc
            outcome['runtime'] = runtime
            outcomes.append(outcome)
            pd.DataFrame(outcomes).to_csv(csv_file)

            temp_outs[temp_test] = y_test_roc

        old_temp = temp
        temp = max(temp_outs, key=temp_outs.get)
        print('best temp', temp, 'old', old_temp)

        decay_outs = {}

        for decay_test in decay_rates:
            start = time.time()

            print(algorithim, act, lr, itera, 'GeomDecay', temp, decay_test,
                  ma, clip)
            nn_model = mlrose.NeuralNetwork(hidden_nodes=nodes,
                                            activation=act,
                                            max_iters=itera,
                                            algorithm=algorithim,
                                            schedule=mlrose.GeomDecay(
                                                init_temp=temp,
                                                decay=decay_test),
                                            bias=True,
                                            is_classifier=True,
                                            learning_rate=lr,
                                            early_stopping=True,
                                            clip_max=clip,
                                            max_attempts=ma,
                                            random_state=1,
                                            curve=True)
            nn_model.fit(X_train, y_train)
            y_train_pred = nn_model.predict(X_train)
            y_train_roc = roc_auc_score(y_train,
                                        y_train_pred,
                                        multi_class="ovr",
                                        average="weighted")
            print('y_train_roc', y_train_roc)

            y_test_pred = nn_model.predict(X_test)
            y_test_roc = roc_auc_score(y_test,
                                       y_test_pred,
                                       multi_class="ovr",
                                       average="weighted")
            print('y_test_roc', y_test_roc)

            runtime = time.time() - start
            print('curr run time', time.time() - start)

            outcome = {}
            outcome['schedule'] = 'GeomDecay'
            outcome['activation'] = act
            outcome['learning_rate'] = lr
            outcome['max_iters'] = itera
            outcome['temperatures'] = temp
            outcome['decay_rates'] = decay_test
            outcome['max_attempts'] = ma
            outcome['clip'] = clip
            outcome['y_train_roc'] = y_train_roc
            outcome['y_test_roc'] = y_test_roc
            outcome['runtime'] = runtime
            outcomes.append(outcome)
            pd.DataFrame(outcomes).to_csv(csv_file)

            decay_outs[decay_test] = y_test_roc

        old_val = dec
        dec = max(decay_outs, key=decay_outs.get)
        print('best decay', dec, 'old', old_val)

        clips_outs = {}
        for clip_test in clips:
            start = time.time()

            print(algorithim, act, lr, itera, 'GeomDecay', temp, dec, ma,
                  clip_test)
            nn_model = mlrose.NeuralNetwork(hidden_nodes=nodes,
                                            activation=act,
                                            max_iters=itera,
                                            algorithm=algorithim,
                                            schedule=mlrose.GeomDecay(
                                                init_temp=temp, decay=dec),
                                            bias=True,
                                            is_classifier=True,
                                            learning_rate=lr,
                                            early_stopping=True,
                                            clip_max=clip_test,
                                            max_attempts=ma,
                                            random_state=1,
                                            curve=True)
            nn_model.fit(X_train, y_train)
            y_train_pred = nn_model.predict(X_train)
            y_train_roc = roc_auc_score(y_train,
                                        y_train_pred,
                                        multi_class="ovr",
                                        average="weighted")
            print('y_train_roc', y_train_roc)

            y_test_pred = nn_model.predict(X_test)
            y_test_roc = roc_auc_score(y_test,
                                       y_test_pred,
                                       multi_class="ovr",
                                       average="weighted")
            print('y_test_roc', y_test_roc)

            runtime = time.time() - start
            print('curr run time', time.time() - start)

            outcome = {}
            outcome['schedule'] = 'GeomDecay'
            outcome['activation'] = act
            outcome['learning_rate'] = lr
            outcome['max_iters'] = itera
            outcome['temperatures'] = temp
            outcome['decay_rates'] = dec
            outcome['max_attempts'] = ma
            outcome['clip'] = clip_test
            outcome['y_train_roc'] = y_train_roc
            outcome['y_test_roc'] = y_test_roc
            outcome['runtime'] = runtime
            outcomes.append(outcome)
            pd.DataFrame(outcomes).to_csv(csv_file)
            clips_outs[clip_test] = y_test_roc

        old_val = clip
        clip = max(clips_outs, key=clips_outs.get)
        print('best clip', clip, 'old', old_val)

        maxa_outs = {}

        for maxa_test in max_attempts:
            start = time.time()

            print(algorithim, act, lr, itera, 'GeomDecay', temp, dec,
                  maxa_test, clip)
            nn_model = mlrose.NeuralNetwork(hidden_nodes=nodes,
                                            activation=act,
                                            max_iters=itera,
                                            algorithm=algorithim,
                                            schedule=mlrose.GeomDecay(
                                                init_temp=temp, decay=dec),
                                            bias=True,
                                            is_classifier=True,
                                            learning_rate=lr,
                                            early_stopping=True,
                                            clip_max=clip,
                                            max_attempts=maxa_test,
                                            random_state=1,
                                            curve=True)
            nn_model.fit(X_train, y_train)
            y_train_pred = nn_model.predict(X_train)
            y_train_roc = roc_auc_score(y_train,
                                        y_train_pred,
                                        multi_class="ovr",
                                        average="weighted")
            print('y_train_roc', y_train_roc)

            y_test_pred = nn_model.predict(X_test)
            y_test_roc = roc_auc_score(y_test,
                                       y_test_pred,
                                       multi_class="ovr",
                                       average="weighted")
            print('y_test_roc', y_test_roc)

            runtime = time.time() - start
            print('curr run time', time.time() - start)

            outcome = {}
            outcome['schedule'] = 'GeomDecay'
            outcome['activation'] = act
            outcome['learning_rate'] = lr
            outcome['max_iters'] = itera
            outcome['temperatures'] = temp
            outcome['decay_rates'] = dec
            outcome['max_attempts'] = maxa_test
            outcome['clip'] = clip
            outcome['y_train_roc'] = y_train_roc
            outcome['y_test_roc'] = y_test_roc
            outcome['runtime'] = runtime
            outcomes.append(outcome)
            pd.DataFrame(outcomes).to_csv(csv_file)

            maxa_outs[maxa_test] = y_test_roc

        old_val = ma
        ma = max(maxa_outs, key=maxa_outs.get)
        print('best ma', ma, 'old', old_val)

        lr_outs = {}

        for lr_test in learning_rate:
            start = time.time()

            print(algorithim, act, lr_test, itera, 'GeomDecay', temp, dec, ma,
                  clip)
            nn_model = mlrose.NeuralNetwork(hidden_nodes=nodes,
                                            activation=act,
                                            max_iters=itera,
                                            algorithm=algorithim,
                                            schedule=mlrose.GeomDecay(
                                                init_temp=temp, decay=dec),
                                            bias=True,
                                            is_classifier=True,
                                            learning_rate=lr_test,
                                            early_stopping=True,
                                            clip_max=clip,
                                            max_attempts=ma,
                                            random_state=1,
                                            curve=True)
            nn_model.fit(X_train, y_train)
            y_train_pred = nn_model.predict(X_train)
            y_train_roc = roc_auc_score(y_train,
                                        y_train_pred,
                                        multi_class="ovr",
                                        average="weighted")
            print('y_train_roc', y_train_roc)

            y_test_pred = nn_model.predict(X_test)
            y_test_roc = roc_auc_score(y_test,
                                       y_test_pred,
                                       multi_class="ovr",
                                       average="weighted")
            print('y_test_roc', y_test_roc)

            runtime = time.time() - start
            print('curr run time', time.time() - start)

            outcome = {}
            outcome['schedule'] = 'GeomDecay'
            outcome['activation'] = act
            outcome['learning_rate'] = lr_test
            outcome['max_iters'] = itera
            outcome['temperatures'] = temp
            outcome['decay_rates'] = dec
            outcome['max_attempts'] = ma
            outcome['clip'] = clip
            outcome['y_train_roc'] = y_train_roc
            outcome['y_test_roc'] = y_test_roc
            outcome['runtime'] = runtime
            outcomes.append(outcome)
            pd.DataFrame(outcomes).to_csv(csv_file)

            lr_outs[lr_test] = y_test_roc

        old_lr = lr
        lr = max(lr_outs, key=lr_outs.get)
        print('best lr', lr, 'old', old_lr)