示例#1
0
def _get_num_dataset_refs():
    script_path = os.path.dirname(__file__)
    datasets = utils.read_all_datasets(script_path, 'TSC')
    dataset_classes = {
        dataset_name: y_train.shape[0]
        for dataset_name, (_, y_train, _, _) in datasets.items()
    }

    return dataset_classes
root_dir = config['PATH']['root_directory']
# xps are hyperparameters.
xps = [
    'use_bottleneck', 'use_residual', 'nb_filters', 'depth', 'kernel_size',
    'batch_size'
]

if sys.argv[1] == 'InceptionTime':
    # run nb_iter_ iterations of Inception on the whole archive indicated in config file
    classifier_name = 'inception'  # train the model first
    archive_name = config['ARCHIVE']['archive_name']
    nb_iter_ = config['INCEPTION_MODEL']['Train'][
        'num_of_Inception_modules']  # number of Inception modules in the network

    datasets_dict = read_all_datasets(root_dir, 1)

    for iter in range(nb_iter_):
        print('\t\titer', iter)

        trr = ''
        if iter != 0:
            trr = '_itr_' + str(iter)

        tmp_output_directory = root_dir + '/results/' + classifier_name + '/' + archive_name + trr + '/'

        for dataset_name in config['ARCHIVE']['data_name_list']:
            print('\t\t\tdataset_name: ', dataset_name)

            x_train, y_train, x_test, y_test, y_true, nb_classes, y_true_train, enc = prepare_data(
            )
示例#3
0
root_dir = '/mnt/nfs/casimir/'

results_dir = root_dir + 'results/fcn/'

batch_size = 16
nb_epochs = 10
verbose = False

write_dir_root = root_dir + '/transfer-learning-results/'

if sys.argv[1] == 'transfer_learning':
    # loop through all archives
    for archive_name in ARCHIVE_NAMES:
        # read all datasets
        datasets_dict = read_all_datasets(root_dir, archive_name)
        # loop through all datasets
        for dataset_name in ALL_DATASET_NAMES:
            # get the directory of the model for this current dataset_name
            output_dir = results_dir + archive_name + '/' + dataset_name + '/'
            # loop through all the datasets to transfer to the learning
            for dataset_name_tranfer in ALL_DATASET_NAMES:
                # check if its the same dataset
                if dataset_name == dataset_name_tranfer:
                    continue
                # set the output directory to write new transfer learning results
                write_output_dir = write_dir_root+archive_name+'/'+dataset_name+\
                 '/'+dataset_name_tranfer+'/'
                write_output_dir = create_directory(write_output_dir)
                if write_output_dir is None:
                    continue
示例#4
0
do_ensemble = True

if do_ensemble:
    root_dir_output = root_deep_learning_dir + 'results/ensemble/'
else:
    if do_data_augmentation:
        root_dir_output = root_deep_learning_dir + 'results/resnet_augment/'
    else:
        root_dir_output = root_deep_learning_dir + 'results/resnet/'

from resnet import Classifier_RESNET

# loop the archive names
for archive_name in ARCHIVE_NAMES:
    # read all the datasets
    datasets_dict = read_all_datasets(root_dir_dataset_archive, archive_name)
    # loop through all the dataset names
    for dataset_name in DATASET_NAMES:
        print('dataset_name: ', dataset_name)
        # read dataset
        x_train, y_train, x_test, y_test, nb_classes, classes, max_prototypes, \
        init_clusters = read_data_from_dataset(use_init_clusters=False)

        # specify the output directory for this experiment
        output_dir = root_dir_output + archive_name + '/' + dataset_name + '/'

        _, classes_counts = np.unique(y_train, return_counts=True)
        # this means that all classes will have a number of time series equal to
        # nb_prototypes
        nb_prototypes = classes_counts.max()
示例#5
0
    }

    classifier_obj = classifier_factory.get_classifier(classifier_name)
    classifier = classifier_obj(output_directory, classifier_params)
    classifier.fit(x_train, y_train, x_test, y_test, y_true)


# main
if sys.argv[1] == 'run_all':
    for classifier_name in constants.CLASSIFIERS:
        print('classifier_name', classifier_name)

        for archive_name in constants.ARCHIVE_NAMES:
            print('\tarchive_name', archive_name)

            datasets_dict = utils.read_all_datasets(ROOT_DIR, archive_name)

            for _iter in range(constants.ITERATIONS):
                print('\t\titer : {}'.format(_iter))

                trr = ''
                if _iter != 0:
                    trr = '_itr_{}'.format(_iter)

                tmp_output_directory = os.path.join(ROOT_DIR, 'results',
                                                    classifier_name,
                                                    archive_name + trr)

                for dataset_name in constants.dataset_names_for_archive[
                        archive_name]:
                    print('\t\t\tdataset_name: ', dataset_name)
示例#6
0
def main():

    start = time.time()

    args = parser.parse_args()

    if args.approach == 1:
        print('Conduct evaluation using approach 1.')

        epochs = ''

        if args.iter is not None:

            iterations = args.iter
        else:
            iterations = ITERATIONS

        if args.cls_epochs is not None:
            epochs = args.cls_epochs

        if args.cls is not None:

            if args.cls == 'allCls':

                for classifier_name in CLASSIFIERS:

                    ARCHIVE_NAME = ARCHIVE_NAMES[0]
                    #print('\tarchive_name', ARCHIVE_NAME)
                    if epochs == '':
                        epochs = CLS_EPOCHS[classifier_name]

                    if args.es_patience is None:
                        cl = classifier_name + '_ep' + str(epochs)
                    else:
                        cl = classifier_name + '_ep' + str(
                            epochs) + '_patience' + str(args.es_patience)

                    print('\tclassifier_name', cl)

                    datasets_dict = read_all_datasets(ROOT_DIR, ARCHIVE_NAME)

                    tmp_output_directory = ROOT_DIR + '/results/' + cl + '/approach1_iter' + str(
                        iterations) + '/'

                    if args.aug == 'noAug':
                        augmentator = None
                        augmentator_name = 'NoAug'

                        run_iterations(args, augmentator, augmentator_name,
                                       tmp_output_directory, iterations,
                                       datasets_dict, classifier_name, epochs,
                                       start)

                    if args.aug == 'allAug':

                        for aug in AUG:

                            if aug == 'Random':

                                for parameters in RANDOM_AUG_PARAMETERS:

                                    diagonal = parameters[0]
                                    down = parameters[1]
                                    right = parameters[2]
                                    each = parameters[3]
                                    n = parameters[4]
                                    augmentator = RandomAug.RandomAug(
                                        n, diagonal, down, right, each)
                                    augmentator_name = aug + '_diag' + str(
                                        diagonal) + '_down' + str(
                                            down) + '_right' + str(
                                                right) + '_each' + str(
                                                    int(each)) + '_n' + str(n)
                                    run_iterations(args, augmentator,
                                                   augmentator_name,
                                                   tmp_output_directory,
                                                   iterations, datasets_dict,
                                                   classifier_name, epochs,
                                                   start)
                            if aug == 'PRA':
                                for n in PRA_N:
                                    augmentator = PartlyRandomAug.PartlyRandomAug(
                                        n)
                                    augmentator_name = aug + '_n' + str(n)
                                    run_iterations(args, augmentator,
                                                   augmentator_name,
                                                   tmp_output_directory,
                                                   iterations, datasets_dict,
                                                   classifier_name, epochs,
                                                   start)
                            if aug == 'Per':
                                for n in PER_N:
                                    for prob in PER_PROB:
                                        augmentator = PermutationAug.PermutationAug(
                                            n, prob)
                                        augmentator_name = aug + '_n' + str(
                                            n) + '_prob' + str(prob)
                                        run_iterations(args, augmentator,
                                                       augmentator_name,
                                                       tmp_output_directory,
                                                       iterations,
                                                       datasets_dict,
                                                       classifier_name, epochs,
                                                       start)

    if args.approach == 2:
        print('Conduct evaluation using approach 2.')
        epochs = ''
        if args.cv is not None:

            cv = args.cv
        else:
            cv = CV

        if args.cls_epochs is not None:
            epochs = args.cls_epochs

        if args.cls is not None:

            if args.cls == 'allCls':

                for classifier_name in CLASSIFIERS:

                    ARCHIVE_NAME = ARCHIVE_NAMES[0]

                    if epochs == '':
                        epochs = CLS_EPOCHS[classifier_name]

                    if args.es_patience is None:
                        cl = classifier_name + '_ep' + str(epochs)
                    else:
                        cl = classifier_name + '_ep' + str(
                            epochs) + '_patience' + str(args.es_patience)

                    print('\tclassifier_name', cl)

                    datasets_dict = read_all_datasets(ROOT_DIR, ARCHIVE_NAME)

                    tmp_output_directory = ROOT_DIR + '/results/' + cl + '/approach2_cv' + str(
                        cv) + '/'

                    if args.aug == 'noAug':
                        augmentator = None
                        augmentator_name = 'NoAug'

                        run_cv(args, augmentator, augmentator_name,
                               tmp_output_directory, datasets_dict,
                               classifier_name, epochs, start, cv)

                    if args.aug == 'allAug':

                        for aug in AUG:

                            if aug == 'Random':
                                for parameters in RANDOM_AUG_PARAMETERS:

                                    diagonal = parameters[0]
                                    down = parameters[1]
                                    right = parameters[2]
                                    each = parameters[3]
                                    n = parameters[4]
                                    augmentator = RandomAug.RandomAug(
                                        n, diagonal, down, right, each)
                                    augmentator_name = aug + '_diag' + str(
                                        diagonal) + '_down' + str(
                                            down) + '_right' + str(
                                                right) + '_each' + str(
                                                    int(each)) + '_n' + str(n)
                                    run_cv(args, augmentator, augmentator_name,
                                           tmp_output_directory, datasets_dict,
                                           classifier_name, epochs, start, cv)

                            if aug == 'PRA':
                                for n in PRA_N:
                                    augmentator = PartlyRandomAug.PartlyRandomAug(
                                        n)
                                    augmentator_name = aug + '_n' + str(n)
                                    run_cv(args, augmentator, augmentator_name,
                                           tmp_output_directory, datasets_dict,
                                           classifier_name, epochs, start, cv)
                            if aug == 'Per':
                                for n in PER_N:
                                    for prob in PER_PROB:
                                        augmentator = PermutationAug.PermutationAug(
                                            n, prob)
                                        augmentator_name = aug + '_n' + str(
                                            n) + '_prob' + str(prob)
                                        run_cv(args, augmentator,
                                               augmentator_name,
                                               tmp_output_directory,
                                               datasets_dict, classifier_name,
                                               epochs, start, cv)

    if args.generate_results_overview:
        generate_results_overview()
        copy_eval_results_and_check_best_models()
        print('Results overview generated.')
    if args.plot_epochs_overview:
        plot_epochs_overview()
        print('Epochs overview plotted.')