def computing_precise_vs_imprecise(in_path=None,
                                   ell_optimal=0.1,
                                   cv_n_fold=10,
                                   seeds=None,
                                   lib_path_server=None,
                                   model_type_precise='lda',
                                   model_type_imprecise='ilda',
                                   scaling=True):
    data = export_data_set('iris.data') if in_path is None else pd.read_csv(
        in_path)
    logger = create_logger("computing_precise_vs_imprecise", True)
    logger.info('Training dataset and models (%s, %s, %s, %s)', in_path,
                model_type_precise, model_type_imprecise, ell_optimal)
    X = data.iloc[:, :-1].values
    if scaling: X = normalize_minmax(X)
    y = np.array(data.iloc[:, -1].tolist())
    seeds = generate_seeds(cv_n_fold) if seeds is None else seeds
    model_impr = __factory_model(model_type_imprecise,
                                 init_matlab=True,
                                 add_path_matlab=lib_path_server,
                                 DEBUG=False)
    model_prec = __factory_model_precise(model_type_precise,
                                         store_covariance=True)
    avg_imprecise, avg_precise, n_real_times = 0, 0, 0
    for time in range(cv_n_fold):
        kf = KFold(n_splits=cv_n_fold, random_state=seeds[time], shuffle=True)
        imprecise_mean, precise_mean, n_real_fold = 0, 0, 0
        for idx_train, idx_test in kf.split(y):
            X_cv_train, y_cv_train = X[idx_train], y[idx_train]
            X_cv_test, y_cv_test = X[idx_test], y[idx_test]
            model_impr.learn(X=X_cv_train, y=y_cv_train, ell=ell_optimal)
            model_prec.fit(X_cv_train, y_cv_train)
            n_real_tests, time_precise, time_imprecise = 0, 0, 0
            n_test, _ = X_cv_test.shape
            for i, test in enumerate(X_cv_test):
                evaluate_imp, _ = model_impr.evaluate(test)
                evaluate = model_prec.predict([test])
                if len(evaluate_imp) > 1:
                    n_real_tests += 1
                    if y_cv_test[i] in evaluate_imp: time_imprecise += 1
                    if y_cv_test[i] in evaluate: time_precise += 1
                logger.debug(
                    "(time, iTest, ellOptimal, cautious, prediction, ground-truth)(%s, %s, %s, %s, %s, %s)",
                    time, i, ell_optimal, evaluate_imp, evaluate, y_cv_test[i])
            logger.debug(
                "(time, ellOptimal, nRealTests, timeImprecise, timePrecise) (%s, %s, %s, %s, %s)",
                time, ell_optimal, n_real_tests, time_imprecise, time_precise)
            if n_real_tests > 0:
                n_real_fold += 1
                imprecise_mean += time_imprecise / n_real_tests
                precise_mean += time_precise / n_real_tests
        logger.debug("(time, nRealFold, imprecise, precise) (%s, %s, %s, %s)",
                     time, n_real_fold, imprecise_mean, precise_mean)
        if n_real_fold > 0:
            n_real_times += 1
            avg_imprecise += imprecise_mean / n_real_fold
            avg_precise += precise_mean / n_real_fold
    logger.debug("(dataset, models, imprec, prec) (%s, %s, %s, %s, %s)",
                 in_path, model_type_imprecise, model_type_precise,
                 avg_imprecise / n_real_times, avg_precise / n_real_times)
示例#2
0
def performance_accuracy_noise_corrupted_test_data(in_train_paths=None,
                                                   in_tests_paths=None,
                                                   model_type_precise='lda',
                                                   model_type_imprecise='ilda',
                                                   ell_optimal=0.1,
                                                   scaling=False,
                                                   lib_path_server=None,
                                                   nb_process=10):
    assert isinstance(in_train_paths,
                      list), "Without training data, cannot create to model"
    assert isinstance(
        in_tests_paths,
        list), "Without training data, cannot performing accuracy"

    logger = create_logger("performance_accuracy_noise_corrupted_test_data",
                           True)
    logger.info('Training dataset (%s, %s, %s)', in_train_paths,
                model_type_imprecise, ell_optimal)

    manager = ManagerWorkers(nb_process=nb_process)
    manager.executeAsync(model_type_imprecise, lib_path_server)
    versus = model_type_imprecise + "_vs_" + model_type_precise
    file_csv = open("results_" + versus + "_noise_accuracy.csv", 'w')
    writer = csv.writer(file_csv)
    model_precise = __factory_model_precise(model_type_precise,
                                            store_covariance=True)
    for in_train_path in in_train_paths:
        X_train, y_train = dataset_to_Xy(in_train_path, scaling=scaling)
        model_precise.fit(X_train, y_train)
        accuracies = dict({})
        for in_test_path in in_tests_paths:
            X_test, y_test = dataset_to_Xy(in_test_path, scaling=scaling)
            _u65, _u80, _set = computing_training_testing_step(
                X_train, y_train, X_test, y_test, ell_optimal, manager, 0, 0,
                0)
            evaluate = model_precise.predict(X_test)
            _acc = sum(
                1 for k, j in zip(evaluate, y_test) if k == j) / len(y_test)
            logger.debug("accuracy-in_test_path (%s, %s, %s, %s, %s, %s)",
                         ntpath.basename(in_train_path),
                         ntpath.basename(in_test_path), ell_optimal, _u65,
                         _u80, _acc)
            accuracies[ntpath.basename(in_test_path)] = [
                ell_optimal, _u65, _u80, _set, _acc
            ]
            writer.writerow([
                ntpath.basename(in_train_path),
                ntpath.basename(in_test_path), ell_optimal, _u65, _u80, _set,
                _acc
            ])
            file_csv.flush()
        logger.debug("Partial-finish-accuracy-noise-corrupted_test %s: %s",
                     ntpath.basename(in_train_path), accuracies)
    manager.poisonPillTraining()
    file_csv.close()
    logger.debug("Finish-accuracy-noise-corrupted_test")
示例#3
0
def performance_hold_out(in_path=None,
                         out_path=None,
                         model_type='lda',
                         test_pct=0.4,
                         n_times=10,
                         seeds=None,
                         scaling=False):
    assert os.path.exists(in_path), "Without training data, not testing"
    assert os.path.exists(out_path), "Without output saving performance"

    logger = create_logger("performance_hold_out", True)
    logger.info('Training data set %s, test percentage %s, model_type %s',
                in_path, test_pct, model_type)

    data = pd.read_csv(in_path, header=None)
    X = data.iloc[:, :-1].values
    if scaling: X = normalize_minmax(X)
    y = data.iloc[:, -1].tolist()

    seeds = generate_seeds(n_times) if seeds is None else seeds
    logger.info('Seeds generated %s', seeds)

    file_csv = open(out_path, 'w')
    writer = csv.writer(file_csv)

    model = __factory_model_precise(model_type, store_covariance=True)
    mean_u65, mean_u80 = np.array([]), np.array([])
    for i in range(0, n_times):
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=test_pct, random_state=seeds[i])
        sum_u65, sum_u80 = 0, 0
        model.fit(X_train, y_train)
        n, _ = X_test.shape
        for j, test in enumerate(X_test):
            evaluate = model.predict([test])
            if y_test[j] in evaluate:
                sum_u65 += u65(evaluate)
                sum_u80 += u80(evaluate)
        logger.info("time, u65, u80 (%s, %s, %s)", i, sum_u65 / n, sum_u80 / n)
        mean_u65 = np.append(mean_u65, sum_u65 / n)
        mean_u80 = np.append(mean_u80, sum_u80 / n)
        writer.writerow([-999, i, mean_u65[i], mean_u80[i]])
        file_csv.flush()
    file_csv.close()
    logger.info("[total:data-set:avgResults] (%s, %s)", np.mean(mean_u65),
                np.mean(mean_u80))
示例#4
0
def performance_cv_accuracy(in_path=None,
                            model_type='lda',
                            cv_n_fold=10,
                            seeds=None,
                            scaling=False):
    assert os.path.exists(in_path), "Without training data, not testing"
    data = pd.read_csv(in_path, header=None)
    logger = create_logger("performance_cv_accuracy", True)
    logger.info('Training data set %s, cv_n_fold %s, model_type %s', in_path,
                cv_n_fold, model_type)
    X = data.iloc[:, :-1].values
    if scaling: X = normalize_minmax(X)
    y = np.array(data.iloc[:, -1].tolist())
    avg_u65, avg_u80 = 0, 0
    seeds = generate_seeds(cv_n_fold) if seeds is None else seeds
    logger.info('Seeds generated %s', seeds)
    for time in range(cv_n_fold):
        # Generation a random k-fold validation.
        kf = KFold(n_splits=cv_n_fold, random_state=seeds[time], shuffle=True)
        model = __factory_model_precise(model_type, store_covariance=True)
        mean_u65, mean_u80 = 0, 0
        for idx_train, idx_test in kf.split(y):
            X_cv_train, y_cv_train = X[idx_train], y[idx_train]
            X_cv_test, y_cv_test = X[idx_test], y[idx_test]
            model.fit(X_cv_train, y_cv_train)
            n_test = len(idx_test)
            sum_u65, sum_u80 = 0, 0
            for i, test in enumerate(X_cv_test):
                evaluate = model.predict([test])
                logger.debug(
                    "(testing, prediction, ground-truth) (%s, %s, %s)", i,
                    evaluate, y_cv_test[i])
                if y_cv_test[i] in evaluate:
                    sum_u65 += u65(evaluate)
                    sum_u80 += u80(evaluate)
            mean_u65 += sum_u65 / n_test
            mean_u80 += sum_u80 / n_test
        logger.info("Time, seed, u65, u80 (%s, %s, %s, %s)", time, seeds[time],
                    mean_u65 / cv_n_fold, mean_u80 / cv_n_fold)
        avg_u65 += mean_u65 / cv_n_fold
        avg_u80 += mean_u80 / cv_n_fold
    logger.info("[Total:data-set:avgResults] (%s, %s,  %s)", in_path,
                avg_u65 / cv_n_fold, avg_u80 / cv_n_fold)
示例#5
0
def performance_qda_regularized(in_path=None,
                                out_path=None,
                                cv_n_fold=10,
                                seeds=None,
                                from_alpha=0,
                                to_alpha=2.0,
                                by_alpha=0.01,
                                scaling=False):
    assert os.path.exists(in_path), "Without training data, not testing"
    assert os.path.exists(out_path), "Without output saving performance"
    data = pd.read_csv(in_path, header=None)
    logger = create_logger("performance_qda_regularized", True)
    logger.info('Training data set %s, cv_n_fold %s, model_type %s', in_path,
                cv_n_fold, "qda")
    X = data.iloc[:, :-1].values
    if scaling: X = normalize_minmax(X)
    y = np.array(data.iloc[:, -1].tolist())

    seeds = generate_seeds(cv_n_fold) if seeds is None else seeds
    logger.info('Seeds generated %s', seeds)

    file_csv = open(out_path, 'a')
    writer = csv.writer(file_csv)

    alphas = np.arange(from_alpha, to_alpha, by_alpha)
    writer.writerow(alphas)

    qda_regularized = [None] * len(alphas)
    for idx, alpha in enumerate(alphas):
        qda_regularized[idx] = __factory_model_precise("qda",
                                                       store_covariance=True,
                                                       reg_param=alpha)
    # Generation a random k-fold validation.
    kf_second = KFold(n_splits=cv_n_fold, random_state=None, shuffle=True)
    ikfold, accuracy, best_alphas = 0, [0] * cv_n_fold, [0] * cv_n_fold
    for idx_learning, idx_testing in kf_second.split(y):
        X_training, y_training = X[idx_learning], y[idx_learning]
        X_testing, y_testing = X[idx_testing], y[idx_testing]
        kf = KFold(n_splits=cv_n_fold,
                   random_state=seeds[ikfold],
                   shuffle=True)
        acc_u80 = [0] * len(qda_regularized)
        for idx_train, idx_test in kf.split(y_training):
            X_cv_train, y_cv_train = X_training[idx_train], y_training[
                idx_train]
            X_cv_test, y_cv_test = X_training[idx_test], y_training[idx_test]
            for model in qda_regularized:
                model.fit(X_cv_train, y_cv_train)
            n_test = len(idx_test)
            for i, test in enumerate(X_cv_test):
                for im, model in enumerate(qda_regularized):
                    evaluate = model.predict([test])
                    if y_cv_test[i] in evaluate:
                        acc_u80[im] += (u80(evaluate) / n_test) / cv_n_fold
        idx_best = np.argmax(acc_u80)
        logger.info("[1kfold:best_model:seed:u80] (%s, %s, %s, %s)", ikfold,
                    alphas[idx_best], seeds[ikfold], acc_u80)
        writer.writerow(acc_u80)
        file_csv.flush()

        best_model = __factory_model_precise("qda",
                                             store_covariance=True,
                                             reg_param=alphas[idx_best])
        best_model.fit(X_training, y_training)
        accuracy[ikfold], bn_test, best_alphas[ikfold] = 0, len(
            idx_testing), alphas[idx_best]
        for i, test in enumerate(X_testing):
            evaluate = best_model.predict([test])
            if y_testing[i] in evaluate:
                accuracy[ikfold] += u80(evaluate) / bn_test
        logger.info("[2kfold:best_model:seed:accuracy] (%s, %s, %s)", ikfold,
                    alphas[idx_best], accuracy[ikfold])
        ikfold += 1
    file_csv.close()
    logger.info("[total:data-set:avgResults] (%s, %s, %s, %s)", in_path,
                np.mean(accuracy), best_alphas, accuracy)