示例#1
0
def performance_accuracy_noise_corrupted_test_data(in_train_paths=None,
                                                   in_tests_paths=None,
                                                   model_type_precise='lda',
                                                   model_type_imprecise='ilda',
                                                   ell_optimal=0.1,
                                                   scaling=False,
                                                   lib_path_server=None,
                                                   nb_process=10):
    assert isinstance(in_train_paths,
                      list), "Without training data, cannot create to model"
    assert isinstance(
        in_tests_paths,
        list), "Without training data, cannot performing accuracy"

    logger = create_logger("performance_accuracy_noise_corrupted_test_data",
                           True)
    logger.info('Training dataset (%s, %s, %s)', in_train_paths,
                model_type_imprecise, ell_optimal)

    manager = ManagerWorkers(nb_process=nb_process)
    manager.executeAsync(model_type_imprecise, lib_path_server)
    versus = model_type_imprecise + "_vs_" + model_type_precise
    file_csv = open("results_" + versus + "_noise_accuracy.csv", 'w')
    writer = csv.writer(file_csv)
    model_precise = __factory_model_precise(model_type_precise,
                                            store_covariance=True)
    for in_train_path in in_train_paths:
        X_train, y_train = dataset_to_Xy(in_train_path, scaling=scaling)
        model_precise.fit(X_train, y_train)
        accuracies = dict({})
        for in_test_path in in_tests_paths:
            X_test, y_test = dataset_to_Xy(in_test_path, scaling=scaling)
            _u65, _u80, _set = computing_training_testing_step(
                X_train, y_train, X_test, y_test, ell_optimal, manager, 0, 0,
                0)
            evaluate = model_precise.predict(X_test)
            _acc = sum(
                1 for k, j in zip(evaluate, y_test) if k == j) / len(y_test)
            logger.debug("accuracy-in_test_path (%s, %s, %s, %s, %s, %s)",
                         ntpath.basename(in_train_path),
                         ntpath.basename(in_test_path), ell_optimal, _u65,
                         _u80, _acc)
            accuracies[ntpath.basename(in_test_path)] = [
                ell_optimal, _u65, _u80, _set, _acc
            ]
            writer.writerow([
                ntpath.basename(in_train_path),
                ntpath.basename(in_test_path), ell_optimal, _u65, _u80, _set,
                _acc
            ])
            file_csv.flush()
        logger.debug("Partial-finish-accuracy-noise-corrupted_test %s: %s",
                     ntpath.basename(in_train_path), accuracies)
    manager.poisonPillTraining()
    file_csv.close()
    logger.debug("Finish-accuracy-noise-corrupted_test")
def performance_cv_accuracy_imprecise(in_path=None,
                                      model_type="ilda",
                                      ell_optimal=0.1,
                                      nb_process=2,
                                      lib_path_server=None,
                                      cv_n_fold=10,
                                      seeds=None,
                                      criterion="maximality"):
    assert os.path.exists(in_path), "Without training data, not testing"
    data = pd.read_csv(in_path)
    logger = create_logger("performance_cv_accuracy_imprecise", True)
    logger.info('Training dataset (%s, %s, %s, %s)', in_path, model_type,
                ell_optimal, criterion)
    X = data.iloc[:, :-1].values
    y = np.array(data.iloc[:, -1].tolist())
    avg_u65, avg_u80 = 0, 0
    seeds = generate_seeds(cv_n_fold) if seeds is None else seeds
    logger.info('Seeds used for accuracy %s', seeds)
    manager = ManagerWorkers(nb_process=nb_process, criterion=criterion)
    manager.executeAsync(model_type, lib_path_server)
    for time in range(cv_n_fold):
        kf = KFold(n_splits=cv_n_fold, random_state=seeds[time], shuffle=True)
        mean_u65, mean_u80 = 0, 0
        for idx_train, idx_test in kf.split(y):
            logger.info("Splits train %s", idx_train)
            logger.info("Splits test %s", idx_test)
            X_cv_train, y_cv_train = X[idx_train], y[idx_train]
            X_cv_test, y_cv_test = X[idx_test], y[idx_test]
            mean_u65, mean_u80 = computing_training_testing_step(
                X_cv_train, y_cv_train, X_cv_test, y_cv_test, ell_optimal,
                manager, mean_u65, mean_u80)
            logger.debug("Partial-kfold (%s, %s, %s, %s)", ell_optimal, time,
                         mean_u65, mean_u80)
        logger.info("Time, seed, u65, u80 (%s, %s, %s, %s)", time, seeds[time],
                    mean_u65 / cv_n_fold, mean_u80 / cv_n_fold)
        avg_u65 += mean_u65 / cv_n_fold
        avg_u80 += mean_u80 / cv_n_fold
    manager.poisonPillTraining()
    logger.debug("total-ell (%s, %s, %s, %s)", in_path, ell_optimal,
                 avg_u65 / cv_n_fold, avg_u80 / cv_n_fold)
示例#3
0
def performance_cv_accuracy_imprecise(in_path=None,
                                      model_type="ilda",
                                      ell_optimal=0.1,
                                      scaling=False,
                                      lib_path_server=None,
                                      cv_n_fold=10,
                                      seeds=None,
                                      nb_process=10):
    assert os.path.exists(
        in_path
    ), "Without training data, cannot performing cross validation accuracy"
    logger = create_logger("performance_cv_accuracy_imprecise", True)
    logger.info('Training dataset (%s, %s, %s)', in_path, model_type,
                ell_optimal)
    X, y = dataset_to_Xy(in_path, scaling=scaling)

    avg_u65, avg_u80 = 0, 0
    seeds = generate_seeds(cv_n_fold) if seeds is None else seeds
    logger.info('Seeds used for accuracy %s', seeds)

    manager = ManagerWorkers(nb_process=nb_process)
    manager.executeAsync(model_type, lib_path_server)
    for time in range(cv_n_fold):
        kf = KFold(n_splits=cv_n_fold, random_state=seeds[time], shuffle=True)
        mean_u65, mean_u80 = 0, 0
        for idx_train, idx_test in kf.split(y):
            mean_u65, mean_u80, _ = computing_training_testing_step(
                X[idx_train], y[idx_train], X[idx_test], y[idx_test],
                ell_optimal, manager, mean_u65, mean_u80)
            logger.debug("Partial-kfold (%s, %s, %s, %s)", ell_optimal, time,
                         mean_u65, mean_u80)
        logger.info("Time, seed, u65, u80 (%s, %s, %s, %s)", time, seeds[time],
                    mean_u65 / cv_n_fold, mean_u80 / cv_n_fold)
        avg_u65 += mean_u65 / cv_n_fold
        avg_u80 += mean_u80 / cv_n_fold
    manager.poisonPillTraining()
    logger.debug("Total-ell (%s, %s, %s, %s)", in_path, ell_optimal,
                 avg_u65 / cv_n_fold, avg_u80 / cv_n_fold)
示例#4
0
def computing_best_imprecise_mean(in_path=None, out_path=None, lib_path_server=None, model_type="ilda",
                                  from_ell=0.1, to_ell=1.0, by_ell=0.1, seed=None, cv_kfold_first=10,
                                  nb_process=2, skip_nfold=0, cv_kfold_second=10, seed_second=None, scaling=False):
    assert os.path.exists(in_path), "Without training data, not testing"
    assert os.path.exists(out_path), "File for putting results does not exist"

    logger = create_logger("computing_best_imprecise_mean_cv", True)
    logger.info('Training dataset (%s, %s, %s)', in_path, out_path, model_type)
    logger.info('Parameters (ells, nbProcess, skip_nfold, cv_kfold_second) (%s, %s, %s, %s, %s, %s)', from_ell,
                to_ell, by_ell, nb_process, skip_nfold, cv_kfold_second)

    data = pd.read_csv(in_path, header=None)
    X = data.iloc[:, :-1].values
    if scaling: X = normalize_minmax(X)
    y = np.array(data.iloc[:, -1].tolist())

    # Seeding a random value for k-fold top learning-testing data
    seed = random.randrange(pow(2, 30)) if seed is None else seed
    logger.debug("[FIRST-STEP-SEED] MODEL: %s, SEED: %s", model_type, seed)

    # Create a CSV file for saving results
    file_csv = open(out_path, 'a')
    writer = csv.writer(file_csv)
    manager = ManagerWorkers(nb_process=nb_process)
    manager.executeAsync(model_type, lib_path_server)

    kfFirst = KFold(n_splits=cv_kfold_first, random_state=seed, shuffle=True)
    acc_u80, acc_u65, idx_kfold = dict(), dict(), 0
    seed_2step = generate_seeds(cv_kfold_second) if seed_second is None else seed_second
    logger.debug("[SECOND-STEP-SEEDS] MODEL: %s, SEED: %s, SECOND-SEED: %s", model_type, seed, seed_2step)
    for idx_learning, idx_testing in kfFirst.split(y):
        ell_u65, ell_u80 = dict(), dict()
        # Generate sampling k-fold (learning, testing) for optimal ell parameters
        X_learning, y_learning = X[idx_learning], y[idx_learning]
        X_testing, y_testing = X[idx_testing], y[idx_testing]
        logger.info("Splits %s learning %s", idx_kfold, idx_learning)
        logger.info("Splits %s testing %s", idx_kfold, idx_testing)

        # # n-Skipping sampling and reboot parameter from_ell to 0.01 next sampling
        if skip_nfold != 0 and idx_kfold > skip_nfold:
            from_ell = 0.01

        # n-Skipping fold cross-validation (purpose for parallel computing)
        if idx_kfold >= skip_nfold:
            # Generate same k-fold-second (train, test) for impartially computing accuracy all ell parameters
            splits_ell = list([])
            logger.debug("[2-STEP-SEED] MODEL: %s, SEED: %s OF FIRST STEP %s", model_type, seed_2step[idx_kfold], seed)
            kfSecond = KFold(n_splits=cv_kfold_second, random_state=seed_2step[idx_kfold], shuffle=True)
            for idx_learn_train, idx_learn_test in kfSecond.split(y_learning):
                splits_ell.append((idx_learn_train, idx_learn_test))
                logger.info("Splits %s train %s", len(splits_ell), idx_learn_train)
                logger.info("Splits %s test %s", len(splits_ell), idx_learn_test)

            for ell_current in np.arange(from_ell, to_ell, by_ell):
                ell_u65[ell_current], ell_u80[ell_current] = 0, 0
                logger.info("ELL_CURRENT %s", ell_current)
                for idx_learn_train, idx_learn_test in splits_ell:
                    logger.info("Splits step train %s", idx_learn_train)
                    logger.info("Splits step test %s", idx_learn_test)
                    X_cv_train, y_cv_train = X_learning[idx_learn_train], y_learning[idx_learn_train]
                    X_cv_test, y_cv_test = X_learning[idx_learn_test], y_learning[idx_learn_test]

                    ell_u65[ell_current], ell_u80[ell_current], _ = \
                        computing_training_testing_step(X_cv_train, y_cv_train, X_cv_test, y_cv_test, ell_current,
                                                        manager, ell_u65[ell_current], ell_u80[ell_current])

                    logger.info("Partial-kfold (%s, %s, %s)", ell_current, ell_u65[ell_current], ell_u80[ell_current])
                ell_u65[ell_current] = ell_u65[ell_current] / cv_kfold_first
                ell_u80[ell_current] = ell_u80[ell_current] / cv_kfold_first
                writer.writerow([ell_current, idx_kfold, ell_u65[ell_current], ell_u80[ell_current]])
                file_csv.flush()
                logger.debug("Partial-ell-k-step (%s, %s, %s)", idx_kfold, ell_u65[ell_current], ell_u80[ell_current])
            logger.debug("Total-ell-k-step (%s, %s, %s, %s)", in_path, idx_kfold, ell_u65, ell_u80)

            # Computing optimal ells for using in testing step
            acc_ell_u80 = max(ell_u80.values())
            acc_ell_u65 = max(ell_u65.values())
            ell_u80_opts = [k for k, v in ell_u80.items() if v == acc_ell_u80]
            ell_u65_opts = [k for k, v in ell_u65.items() if v == acc_ell_u65]
            acc_u65[idx_kfold], acc_u80[idx_kfold] = 0, 0
            n_ell80_opts, n_ell65_opts = len(ell_u80_opts), len(ell_u65_opts)
            for ell_u80_opt in ell_u80_opts:
                logger.info("ELL_OPTIMAL_CV_U80 %s", ell_u80_opt)
                _, _acc_u80, _ = \
                    computing_training_testing_step(X_learning, y_learning, X_testing, y_testing, ell_u80_opt,
                                                    manager, 0, 0)
                acc_u80[idx_kfold] += _acc_u80
                writer.writerow([-999, -8, ell_u80_opt, _acc_u80])

            for ell_u65_opt in ell_u65_opts:
                logger.info("ELL_OPTIMAL_CV_U65 %s", ell_u65_opt)
                _acc_u65, _, _ = \
                    computing_training_testing_step(X_learning, y_learning, X_testing, y_testing, ell_u65_opt,
                                                    manager, 0, 0)
                acc_u65[idx_kfold] += _acc_u65
                writer.writerow([-999, -7, ell_u65_opt, _acc_u65])

            acc_u65[idx_kfold] = acc_u65[idx_kfold] / n_ell65_opts
            acc_u80[idx_kfold] = acc_u80[idx_kfold] / n_ell80_opts
            writer.writerow([-999, idx_kfold, acc_u65[idx_kfold], acc_u80[idx_kfold]])
            file_csv.flush()
            logger.debug("Partial-ell-2step (u80, u65, accs) (%s, %s, %s, %s, %s)", -999, ell_u80_opts, ell_u65_opts,
                         acc_u65[idx_kfold], acc_u80[idx_kfold])
        idx_kfold += 1
    writer.writerow([-9999, -9, np.mean(list(acc_u65.values())), np.mean(list(acc_u80.values()))])
    manager.poisonPillTraining()
    file_csv.close()
    logger.debug("Total-accuracy (%s, %s, %s)", in_path, acc_u65, acc_u80)
    logger.debug("Total-avg-accuracy (%s, %s, %s)", in_path, np.mean(list(acc_u65.values())),
                 np.mean(list(acc_u80.values())))
def computing_best_imprecise_mean(in_path=None,
                                  out_path=None,
                                  cv_nfold=10,
                                  model_type="ilda",
                                  test_size=0.4,
                                  from_ell=0.1,
                                  to_ell=1.0,
                                  by_ell=0.1,
                                  seeds=None,
                                  lib_path_server=None,
                                  nb_process=2,
                                  n_sampling=10,
                                  skip_n_sample=0,
                                  criterion="maximality",
                                  scaling=False):
    assert os.path.exists(in_path), "Without training data, not testing"
    assert os.path.exists(out_path), "File for putting results does not exist"

    logger = create_logger("computing_best_imprecise_mean_sampling", True)
    logger.info('Training dataset (%s, %s, %s)', in_path, model_type,
                criterion)
    logger.info(
        'Parameters (size, ells, nbProcess, sampling, nSkip) (%s, %s, %s, %s, %s, %s, %s)',
        test_size, from_ell, to_ell, by_ell, nb_process, n_sampling,
        skip_n_sample)
    data = pd.read_csv(in_path, header=None)
    X = data.iloc[:, :-1].values
    if scaling: X = normalize_minmax(X)
    y = np.array(data.iloc[:, -1].tolist())

    # Seed for get back up if process is killed
    seeds = generate_seeds(n_sampling) if seeds is None else seeds
    logger.debug("MODEL: %s, SEED: %s", model_type, seeds)

    # Create a CSV file for saving results
    file_csv = open(out_path, 'a')
    writer = csv.writer(file_csv)
    manager = ManagerWorkers(nb_process=nb_process, criterion=criterion)
    manager.executeAsync(model_type, lib_path_server)
    acc_u80, acc_u65 = dict(), dict()
    for sampling in range(min(n_sampling, len(seeds))):
        X_learning, X_testing, y_learning, y_testing = \
            train_test_split(X, y, test_size=test_size, random_state=seeds[sampling])
        logger.info("Splits %s learning %s", sampling, y_learning)
        logger.info("Splits %s testing %s", sampling, y_testing)

        # n-Skipping sampling and reboot parameter from_ell to 0.01 next sampling
        if skip_n_sample != 0 and sampling > skip_n_sample: from_ell = 0.01
        # n-Skipping sampling testing (purpose for parallel computing)
        if sampling >= skip_n_sample:
            kf = KFold(n_splits=cv_nfold, random_state=None, shuffle=True)
            ell_u65, ell_u80, splits = dict(), dict(), list([])
            for idx_train, idx_test in kf.split(y_learning):
                splits.append((idx_train, idx_test))
                logger.info("Sampling %s Splits %s train %s", sampling,
                            len(splits), idx_train)
                logger.info("Sampling %s Splits %s test %s", sampling,
                            len(splits), idx_test)

            for ell_current in np.arange(from_ell, to_ell, by_ell):
                ell_u65[ell_current], ell_u80[ell_current] = 0, 0
                logger.info("ELL_CURRENT %s", ell_current)
                for idx_train, idx_test in splits:
                    logger.info("Splits train %s", idx_train)
                    logger.info("Splits test %s", idx_test)
                    X_cv_train, y_cv_train = X_learning[idx_train], y_learning[
                        idx_train]
                    X_cv_test, y_cv_test = X_learning[idx_test], y_learning[
                        idx_test]
                    # Computing accuracy testing for cross-validation step
                    ell_u65[ell_current], ell_u80[ell_current] = \
                        computing_training_testing_step(X_cv_train, y_cv_train, X_cv_test, y_cv_test, ell_current,
                                                        manager, ell_u65[ell_current], ell_u80[ell_current])
                    logger.info("Partial-kfold (%s, %s, %s)", ell_current,
                                ell_u65[ell_current], ell_u80[ell_current])

                ell_u65[ell_current] = ell_u65[ell_current] / cv_nfold
                ell_u80[ell_current] = ell_u80[ell_current] / cv_nfold
                writer.writerow([
                    ell_current, sampling, ell_u65[ell_current],
                    ell_u80[ell_current]
                ])
                file_csv.flush()
                logger.debug("Partial-ell-sampling (%s, %s, %s, %s)",
                             ell_current, sampling, ell_u65, ell_u80)
            logger.debug("Total-ell-sampling (%s, %s, %s, %s)", in_path,
                         sampling, ell_u65, ell_u80)

            # Computing optimal ells for using in testing step
            acc_ellu80 = max(ell_u80.values())
            acc_ellu65 = max(ell_u65.values())
            ellu80_opts = [k for k, v in ell_u80.items() if v == acc_ellu80]
            ellu65_opts = [k for k, v in ell_u65.items() if v == acc_ellu65]
            acc_u65[sampling], acc_u80[sampling] = 0, 0
            n_ell80_opts, n_ell65_opts = len(ellu80_opts), len(ellu65_opts)

            for ellu80_opt in ellu80_opts:
                logger.info("ELL_OPTIMAL_SAMPLING_U80 %s", ellu80_opt)
                _, acc_u80[sampling] = \
                    computing_training_testing_step(X_learning, y_learning, X_testing, y_testing, ellu80_opt,
                                                    manager, 0, acc_u80[sampling])

            for ellu65_opt in ellu65_opts:
                logger.info("ELL_OPTIMAL_SAMPLING_U65 %s", ellu65_opt)
                acc_u65[sampling], _ = \
                    computing_training_testing_step(X_learning, y_learning, X_testing, y_testing, ellu65_opt,
                                                    manager, acc_u65[sampling], 0)

            acc_u65[sampling] = acc_u65[sampling] / n_ell65_opts
            acc_u80[sampling] = acc_u80[sampling] / n_ell80_opts
            writer.writerow(
                [-999, sampling, acc_u65[sampling], acc_u80[sampling]])
            file_csv.flush()
            logger.debug("Partial-ell-2step (%s, %s, %s, %s)", -999,
                         ellu80_opts, acc_u65[sampling], acc_u80[sampling])

    writer.writerow([
        -9999, -9,
        np.mean(list(acc_u65.values())),
        np.mean(list(acc_u80.values()))
    ])
    manager.poisonPillTraining()
    file_csv.close()
    logger.debug("Total-accuracy (%s, %s, %s)", in_path, acc_u65, acc_u80)
    logger.debug("Total-avg-accuracy (%s, %s, %s)", in_path,
                 np.mean(list(acc_u65.values())),
                 np.mean(list(acc_u80.values())))