def test_neural_network_basic():
    runner = Runner('model/experiment/output/neural_network_basic',
                    load_sample_data_frame(), 'violation',
                    neural_network_basic, None)
    runner.run_classification_experiment(sample=sample,
                                         multiclass=True,
                                         record_predict_proba=True)
Esempio n. 2
0
def test_gaussian_naive_bayes_basic():
    runner = Runner('model/experiment/output/complement_naive_bayes_basic',
                    load_sample_data_frame(), 'violation',
                    gaussian_naive_bayes_basic, None)
    runner.run_classification_experiment(sample=sample,
                                         multiclass=True,
                                         record_predict_proba=True)
Esempio n. 3
0
def build_xgboost_model():
    runner = Runner('model/output/xgboost_model', load_data_frame(),
                    'violation', xgboost_pipeline, None)
    runner.run_classification_experiment(sample=sample,
                                         test_size=0.2,
                                         multiclass=True,
                                         record_predict_proba=True)
    joblib.dump(xgboost_pipeline, 'model/output/xgboost_model.joblib')
def test_lightgbm_basic():
    runner = Runner('model/experiment/output/lightgbm_basic',
                    load_sample_data_frame(), 'violation', lightgbm_basic,
                    hyper_parameters)
    runner.run_classification_search_experiment('neg_log_loss',
                                                sample=sample,
                                                n_iter=iterations,
                                                multiclass=True,
                                                record_predict_proba=True)
Esempio n. 5
0
 def run_algorithm(self):
     try:
         max_gen = int(self.max_gen.text())
         algorithm = HillClimbing(max_gen)
         val, t_time = Runner.run_algorithm(algorithm, self.title)
         self.print_results(val, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:' + str(error))
Esempio n. 6
0
 def run_algorithm(self):
     try:
         max_gen = int(self.max_gen.text())
         pop_size = int(self.pop_size.text())
         num_children = int(self.num_children.text())
         algorithm = EvolutionStrat(max_gen, pop_size, num_children)
         val, t_time = Runner.run_algorithm(algorithm, self.title)
         self.print_results(val, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:' + str(error))
Esempio n. 7
0
 def run_algorithm(self):
     try:
         max_gen = int(self.max_gen.text())
         pop_size = int(self.pop_size.text())
         clone_f = float(self.clone_f.text())
         num_rand = int(self.num_rand.text())
         algorithm = ClonalSelection(max_gen, pop_size, clone_f, num_rand)
         val, t_time = Runner.run_algorithm(algorithm, self.title)
         self.print_results(val, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:' + str(error))
Esempio n. 8
0
 def run_algorithm(self):
     try:
         pop_size = int(self.pop_size.text())
         best_p = int(self.best_pop.text())
         max_gen = int(self.max_gen.text())
         p_m = float(self.p_m.text())
         algorithm = Genetic(pop_size, best_p, max_gen, p_m)
         val, t_time = Runner.run_algorithm(algorithm, self.title)
         self.print_results(val, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:'+str(error))
Esempio n. 9
0
 def run_algorithm(self):
     try:
         max_gen = int(self.max_gen.text())
         pop_size = int(self.pop_size.text())
         weight_f = float(self.weight_f.text())
         cross_r = float(self.cross_r.text())
         algorithm = DiffEvolution(max_gen, pop_size, weight_f, cross_r)
         val, t_time = Runner.run_algorithm(algorithm, self.title)
         self.print_results(val, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:'+str(error))
Esempio n. 10
0
 def run_algorithm(self):
     try:
         max_gen = int(self.max_gen.text())
         num_samples = int(self.num_samples.text())
         num_update = int(self.num_update.text())
         learning_r = float(self.learning_r.text())
         algorithm = CrossEntropy(max_gen, num_samples, num_update,
                                  learning_r)
         val, t_time = Runner.run_algorithm(algorithm, self.title)
         self.print_results(val, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:' + str(error))
Esempio n. 11
0
 def run_algorithm(self):
     try:
         max_gen = int(self.max_gen.text())
         mem_size = int(self.mem_size.text())
         consid_r = float(self.consid_r.text())
         adjust_r = float(self.adjust_r.text())
         rang = float(self.rang.text())
         algorithm = Harmony(max_gen, mem_size, consid_r, adjust_r, rang)
         val, t_time = Runner.run_algorithm(algorithm, self.title)
         self.print_results(val, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:' + str(error))
Esempio n. 12
0
 def run_algorithm(self):
     try:
         max_gen = int(self.max_gen.text())
         pop_size = int(self.pop_size.text())
         num_clones = int(self.num_clones.text())
         beta = int(self.beta.text())
         num_rand = int(self.num_rand.text())
         algorithm = ImmuneNetwork(max_gen, pop_size, num_clones, beta,
                                   num_rand)
         val, t_time = Runner.run_algorithm(algorithm, self.title)
         self.print_results(val, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:' + str(error))
Esempio n. 13
0
 def run_algorithm(self):
     try:
         max_gen = int(self.max_gen.text())
         pop_size = int(self.pop_size.text())
         weight_f = float(self.weight_f.text())
         cross_r = float(self.cross_r.text())
         num_threads = int(self.num_threads.text())
         algorithm = DiffEvoAlgorithm(pop_size, max_gen, weight_f, cross_r,
                                      num_threads)
         bests, t_time = Runner.run_algorithm_p(algorithm, self.title)
         self.print_results_p(bests, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:' + str(error))
Esempio n. 14
0
 def run_algorithm(self):
     try:
         pop_size = int(self.pop_size.text())
         best_p = int(self.best_pop.text())
         max_gen = int(self.max_gen.text())
         p_m = float(self.p_m.text())
         num_threads = int(self.num_threads.text())
         algorithm = GenParallelAlgorithm(pop_size, best_p, max_gen, p_m,
                                          num_threads)
         bests, t_time = Runner.run_algorithm_p(algorithm, self.title)
         self.print_results_p(bests, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:' + str(error))
Esempio n. 15
0
 def run_algorithm(self):
     try:
         max_gen = int(self.max_gen.text())
         pop_size = int(self.pop_size.text())
         p_cross = float(self.p_cross.text())
         max_local_gens = int(self.max_local_gens.text())
         p_mut = float(self.p_mut.text())
         p_local = float(self.p_local.text())
         algorithm = Memetic(max_gen, pop_size, p_cross, p_mut,
                             max_local_gens, p_local)
         val, t_time = Runner.run_algorithm(algorithm, self.title)
         self.print_results(val, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:' + str(error))
def build_xgboost_model():
    runner = Runner('model/output/xgboost_basic', load_clean_data_frame(),
                    'arrest', xgboost_pipeline, hyper_parameters)
    runner.run_classification_search_experiment('roc_auc',
                                                sample=sample,
                                                n_iter=iterations,
                                                record_predict_proba=True)
    joblib.dump(runner.trained_estimator, 'model/output/xgboost_basic.joblib')

    runner = Runner('model/output/xgboost_basic_fs', load_clean_data_frame(),
                    'arrest', xgboost_pipeline_fs, hyper_parameters)
    runner.run_classification_search_experiment('roc_auc',
                                                sample=sample,
                                                n_iter=iterations,
                                                record_predict_proba=True)
    joblib.dump(runner.trained_estimator,
                'model/output/xgboost_basic_fs.joblib')
Esempio n. 17
0
 def run_algorithm(self):
     try:
         max_gen = int(self.max_gen.text())
         init_f = float(self.init_f.text())
         s_factor = float(self.s_factor.text())
         l_factor = float(self.l_factor.text())
         iter_mult = int(self.iter_mult.text())
         max_no_impr = int(self.max_no_impr.text())
         algorithm = AdaptativeRandomS(max_gen, init_f, s_factor, l_factor,
                                       iter_mult, max_no_impr)
         val, t_time = Runner.run_algorithm(algorithm, self.title)
         self.print_results(val, t_time)
     except ValueError as error:
         QMessageBox.warning(self, 'Value error',
                             'Invalid input:' + str(error))
Esempio n. 18
0
def build_sgd_huber_loss():
    runner = Runner(
        'model/output/sgd_huber_loss_over_sampled',
        load_clean_data_frame(),
        'arrest',
        sgd
    )
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        sampling=SMOTE(),
        fit_increment=fit_increment,
        max_iters=max_iters,
        n_jobs=1,
        cv=None
    )
    joblib.dump(
        pipeline,
        'model/output/sgd_huber_loss_over_sampled.joblib'
    )

    runner = Runner(
        'model/output/sgd_huber_loss_over_sampled_fs',
        load_clean_data_frame(),
        'arrest',
        sgd_fs
    )
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=mapper,
        sampling=SMOTE(),
        fit_increment=fit_increment,
        max_iters=max_iters,
        n_jobs=1,
        cv=None
    )
    joblib.dump(
        pipeline_fs,
        'model/output/sgd_huber_loss_over_sampled_fs.joblib'
    )
Esempio n. 19
0
def build_neural_network():
    runner = Runner('model/output/neural_network_basic',
                    load_clean_data_frame(), 'arrest', nn)
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        max_iters=max_iters,
        cv=None,
        n_jobs=1)
    joblib.dump(pipeline, 'model/output/neural_network_basic.joblib')

    runner = Runner('model/output/neural_network_basic_fs',
                    load_clean_data_frame(), 'arrest', nn_fs)
    runner.run_classification_experiment(sample=sample,
                                         record_predict_proba=True,
                                         transformer=mapper,
                                         fit_increment=fit_increment,
                                         max_iters=max_iters,
                                         cv=None,
                                         n_jobs=1)
    joblib.dump(pipeline_fs, 'model/output/neural_network_basic_fs.joblib')
def test_decision_tree():
    runner = Runner('model/experiment/output/decision_tree_basic',
                    load_clean_sample_data_frame(),
                    'arrest',
                    decision_tree_basic,
                    hyper_parameters=hyper_parameters)
    runner.run_classification_search_experiment('roc_auc',
                                                sample=sample,
                                                n_iter=iterations,
                                                record_predict_proba=True)

    runner = Runner('model/experiment/output/decision_tree_under_sampled',
                    load_clean_sample_data_frame(),
                    'arrest',
                    decision_tree_basic,
                    hyper_parameters=hyper_parameters)
    runner.run_classification_search_experiment('roc_auc',
                                                sample=sample,
                                                n_iter=iterations,
                                                record_predict_proba=True,
                                                sampling=RandomUnderSampler())

    runner = Runner('model/experiment/output/decision_tree_over_sampled',
                    load_clean_sample_data_frame(),
                    'arrest',
                    decision_tree_basic,
                    hyper_parameters=hyper_parameters)
    runner.run_classification_search_experiment('roc_auc',
                                                sample=sample,
                                                n_iter=iterations,
                                                record_predict_proba=True,
                                                sampling=SMOTE())

    runner = Runner('model/experiment/output/decision_tree_combine_sampled',
                    load_clean_sample_data_frame(),
                    'arrest',
                    decision_tree_basic,
                    hyper_parameters=hyper_parameters)
    runner.run_classification_search_experiment('roc_auc',
                                                sample=sample,
                                                n_iter=iterations,
                                                record_predict_proba=True,
                                                sampling=SMOTEENN())
def test_gaussian_naive_bayes():
    runner = Runner('model/experiment/output/gaussian_naive_bayes_basic',
                    load_clean_sample_data_frame(), 'arrest', GaussianNB())
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        n_jobs=1)

    runner = Runner(
        'model/experiment/output/gaussian_naive_bayes_under_sampled',
        load_clean_sample_data_frame(), 'arrest', GaussianNB())
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        n_jobs=1,
        sampling=RandomUnderSampler())

    runner = Runner(
        'model/experiment/output/gaussian_naive_bayes_over_sampled',
        load_clean_sample_data_frame(), 'arrest', GaussianNB())
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        n_jobs=1,
        sampling=SMOTE())

    runner = Runner(
        'model/experiment/output/gaussian_naive_bayes_combine_sampled',
        load_clean_sample_data_frame(), 'arrest', GaussianNB())
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        n_jobs=1,
        sampling=SMOTEENN())
Esempio n. 22
0
def test_neural_network():
    runner = Runner(
        'model/experiment/output/neural_network_basic',
        load_clean_sample_data_frame(), 'arrest',
        MLPClassifier(hidden_layer_sizes=(
            750,
            125,
        ), verbose=True))
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        max_iters=max_iters,
        n_jobs=1)

    runner = Runner(
        'model/experiment/output/neural_network_under_sampled',
        load_clean_sample_data_frame(), 'arrest',
        MLPClassifier(hidden_layer_sizes=(
            750,
            125,
        ), verbose=True))
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        max_iters=max_iters,
        n_jobs=1,
        sampling=RandomUnderSampler())

    runner = Runner(
        'model/experiment/output/neural_network_over_sampled',
        load_clean_sample_data_frame(), 'arrest',
        MLPClassifier(hidden_layer_sizes=(
            750,
            125,
        ), verbose=True))
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        max_iters=max_iters,
        n_jobs=1,
        sampling=SMOTE())

    runner = Runner(
        'model/experiment/output/neural_network_combine_sampled',
        load_clean_sample_data_frame(), 'arrest',
        MLPClassifier(hidden_layer_sizes=(
            750,
            125,
        ), verbose=True))
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        max_iters=max_iters,
        n_jobs=1,
        sampling=SMOTEENN())
Esempio n. 23
0
def test_sgd_log_loss():
    runner = Runner('model/experiment/output/sgd_log_loss_basic',
                    load_clean_sample_data_frame(), 'arrest',
                    SGDClassifier(loss='log'))
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        max_iters=max_iters,
        n_jobs=1)

    runner = Runner('model/experiment/output/sgd_log_loss_under_sampled',
                    load_clean_sample_data_frame(), 'arrest',
                    SGDClassifier(loss='log'))
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        max_iters=max_iters,
        n_jobs=1,
        sampling=RandomUnderSampler())

    runner = Runner('model/experiment/output/sgd_log_loss_over_sampled',
                    load_clean_sample_data_frame(), 'arrest',
                    SGDClassifier(loss='log'))
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        max_iters=max_iters,
        n_jobs=1,
        sampling=SMOTE())

    runner = Runner('model/experiment/output/sgd_log_loss_combine_sampled',
                    load_clean_sample_data_frame(), 'arrest',
                    SGDClassifier(loss='log'))
    runner.run_classification_experiment(
        sample=sample,
        record_predict_proba=True,
        transformer=binned_geo_one_hot_data_mapper,
        fit_increment=fit_increment,
        max_iters=max_iters,
        n_jobs=1,
        sampling=SMOTEENN())