Beispiel #1
0
############################################### main 

# change this directory for your machine
# it should contain the archive folder containing both univariate and multivariate archives
root_dir ='/scratch/Project-CTI/data/SynCAN/classification_SOA'

if sys.argv[1]=='transform_mts_to_ucr_format':
    transform_mts_to_ucr_format()
elif sys.argv[1]=='visualize_filter':
    visualize_filter(root_dir)
elif sys.argv[1]=='viz_for_survey_paper':
    viz_for_survey_paper(root_dir)
elif sys.argv[1]=='viz_cam':
    viz_cam(root_dir)
elif sys.argv[1]=='generate_results_csv':
    res = generate_results_csv('results.csv',root_dir)
    print(res)
elif sys.argv[1]=='mts_benchmark':
    info_dict = get_info_run()
    archive_names = info_dict['archive_names']
    mts_data_names = info_dict['mts_dataset_names']
    classifier_names = info_dict['classifiers_names']

    print(archive_names, mts_data_names, classifier_names)
    
    itr = sys.argv[2]
    if itr == '_itr_0': 
        itr = ''
    
    for archive_name in archive_names:
        for dataset_name in mts_data_names:
                                                nb_classes,
                                                clf_name=clf_name)

                classifier.fit(x_train, y_train, x_test, y_test, y_true)

elif sys.argv[1] == 'run_length_xps':
    # this is to generate the archive for the length experiments
    """
    this inject a pattern with an amplitude equal to 1.0 in a pre-defined region of the time series. 
    This region will be specific to a certain class, 
    therefore by changing the placement of this pattern we can generate an unlimited amount of classes, 
    whereas the random noise will allow us to generate an unlimited amount of time series instances per class.
    """
    sample_dataset = config['DATA_AUGMENTATION']['sample_dataset']
    run_length_xps(root_dir)

elif sys.argv[1] == 'generate_results_csv':
    clfs = []
    itr = '-' + str(
        config['INCEPTION_MODEL']['Train']['num_of_Inception_modules'])
    inceptionTime = 'nne/inception'
    # add InceptionTime: an ensemble of Inception networks indicated in config file
    clfs.append(inceptionTime + itr)
    # add InceptionTime for each hyperparameter study
    for xp in xps:
        xp_arr = get_xp_val(xp)
        for xp_val in xp_arr:
            clfs.append(inceptionTime + '/' + xp + '/' + str(xp_val) + itr)
    df = generate_results_csv('results.csv', root_dir, clfs)
    print(df)
Beispiel #3
0
                    print('\t\t\t\tDONE')

                    # the creation of this directory means
                    utils.create_directory(
                        os.path.join(output_directory, 'DONE'))

elif sys.argv[1] == 'transform_mts_to_ucr_format':
    utils.transform_mts_to_ucr_format()
elif sys.argv[1] == 'visualize_filter':
    utils.visualize_filter(ROOT_DIR)
elif sys.argv[1] == 'viz_for_survey_paper':
    utils.viz_for_survey_paper(ROOT_DIR)
elif sys.argv[1] == 'viz_cam':
    utils.viz_cam(ROOT_DIR)
elif sys.argv[1] == 'generate_results_csv':
    res = utils.generate_results_csv('results.csv', ROOT_DIR)
    print(res.to_string())
else:
    # this is the code used to launch an experiment on a dataset
    archive_name = sys.argv[1]
    dataset_name = sys.argv[2]
    classifier_name = sys.argv[3]
    itr = sys.argv[4]

    if itr == '_itr_0':
        itr = ''

    output_directory = os.path.join(ROOT_DIR, 'results', classifier_name,
                                    archive_name + itr, dataset_name)
    test_dir_df_metrics = os.path.join(output_directory, 'df_metrics.csv')