Exemple #1
0
def generate_test_set(data_split,
                      filters,
                      img_shape,
                      keep_original,
                      test_noise_key='test_set_noise',
                      h5_noise_key='h5_test_noise',
                      test_key='test_set',
                      chungus=4):
    h5_path = get_h5_test()
    h5_obj = h5_object(h5_path, training_split=1)
    generate_and_save_dataset(h5_obj, data_split, filters, img_shape,
                              get_paths(test_noise_key), get_paths(test_key),
                              get_paths(h5_noise_key), chungus, keep_original)
Exemple #2
0
def QuickDebug() -> None:
    """Small function that shows how to call the perlin class with some config dict. And shows the resulting image
    """
    img = Image.open(get_paths("dataset"))
    p = {'octaves': 6, 'persistence': 0.5, 'lacunarity': 2.0, 'alpha': 0.3}
    pn = perlin(p)
    img = pn.Foggyfy(img).show()
Exemple #3
0
def run_experiment_one(lazy_split:int, train_h5_path:str, test_h5_path:str, get_models, epochs_end:int=10, dataset_split:int=0.7, folder_extension = None, model_paths=None, data_to_test_on=1, noise_tuple=None, run_on_one_model=False)->None:
    """This method runs experiment one, and is done in several steps:
            1. For each epoch to train for, the models are trained. After each epoch, the accuracy is saved on the object.
            2. When the training is done, all the data is saved in csv files as (Epochs,Resolution,Class,Class_Acuracy,Total_in_Class)
                Here the accuracy for all classes has its own row.
            3. Once this is done, rather than representing all classes in each epoch in seperate row, this is combined with one row for
                each epoch (Epochs,Model_accuracy,Resolution).
            4. All the summed files are now combined into one file, as (class, model[resolution 1]_[epoch 1], ... , model[resolution n]_[epoch n])
            5. Now for each model, the maximal accuracy is found, and the given epoch is saved
            6. Based on this information, three new models are made, with the idael epoch, representing the best possible models for this experiment.

    Args:
        lazy_split (int): How many pieces the dataset should be split into
        train_h5_path (str): The path for the trainig h5py
        test_h5_path (str): The path for the test h5py
        epochs_end (int, optional): The upper limit for how many epochs to train the modesl for. Defaults to 10.
        dataset_split (int, optional): The split between the training and validation set. Defaults to 0.7.
    """
    base_path = get_paths('phase_one_csv') if folder_extension == None else f"{get_paths('phase_one_csv')}/{folder_extension}"
    if not folder_extension == None and not os.path.exists(base_path):
        os.mkdir(base_path)
    
    print("---------------------")
    print(f"The output data for the following experiment will be saved in the following folder:")
    print(f"{base_path}")
    print("---------------------")
    
    check_if_valid_path(base_path)
    
    h5_train = h5_object(train_h5_path, training_split=dataset_split)
    h5_test = h5_object(test_h5_path, training_split=1)

    custom_error_check(verify_class_amounts(h5_test.class_in_h5, h5_train.class_in_h5), f"The input train and test set does not have matching classes {h5_train.class_in_h5} - {h5_test.class_in_h5}")

    if run_on_one_model:
        model_object_list = [get_models(h5_train.class_in_h5, model_paths=model_paths)[-1]]
    else:
        model_object_list = get_models(h5_train.class_in_h5, model_paths=model_paths)

    find_ideal_model(h5_train, model_object_list, lazy_split=lazy_split, epochs=epochs_end, save_models=True, data_to_test_on=data_to_test_on, noise_tuple=noise_tuple)

    print(f"\n------------------------\nTraining done. Now evaluation will be made.\n\n")

    sum_test_path = f"{base_path}/test_sum_class_accuracy.csv"
    sum_val_path = f"{base_path}/val_sum_class_accuracy.csv"

    #TODO: Fix epoch count in test_val_sum_class_accuracy.csv
    _, _, image_dataset, lable_dataset = h5_train.shuffle_and_lazyload(0, data_to_test_on)
    iterate_and_sum(model_object_list, 'val', sum_val_path, image_dataset, lable_dataset, epochs_end, h5_train.images_in_classes, base_path, folder_extension)
    
    if run_on_one_model:
        model_object_list_loaded = [get_models(h5_train.class_in_h5, load_trained_models=True)[-1]]
    else:
        model_object_list_loaded = get_models(h5_train.class_in_h5, load_trained_models=True)
    
    image_dataset, lable_dataset, _, _ = h5_test.shuffle_and_lazyload(0, data_to_test_on)
    iterate_and_sum(model_object_list_loaded, 'test', sum_test_path, image_dataset, lable_dataset, -1, h5_test.images_in_classes, base_path, folder_extension, epochs=[x.fit_data[-1][0] for x in model_object_list])
    combine_two_summed_class_accracy(sum_test_path, sum_val_path, base_path)

    save_fitdata(model_object_list, base_path)
Exemple #4
0
def QuickDebug()->None:
    img = Image.open(get_paths("dataset"))
    day = {'factor':1.3} 
    night = {'factor':0.3}
    bright = brightness(day)
    dark = brightness(night)
    bright.DayAdjustment(img).show()
    dark.DayAdjustment(img).show()
Exemple #5
0
def sum_train_test_file(get_category, get_sub_category, get_class_accuracy):
    base_path = get_paths('phase_one_csv')

    train_test_path = f"{base_path}/train_test_dist.csv"
    sum_path = f"{base_path}/sum_train_test_sub_cat.csv"
    sum_summed_path = f"{base_path}/sum_summed_train_test_sub_cat.csv"

    make_train_test_size_graph(train_test_path)
    csv_obj = cvs_object(train_test_path)
    data = generalized_sum(
        csv_obj, sum_con(get_sub_category, get_category, get_class_accuracy))
    csv_obj.write(data, path=sum_path, overwrite_path=True)
    data = generalized_sum(csv_obj, sum_summed_con(get_class_accuracy))
    csv_obj.write(data, path=sum_summed_path, overwrite_path=True)
Exemple #6
0
def initailize_initial_values(folder_extension: str, filter_method) -> tuple:
    """Function for instantiating some values

    Args:
        folder_extension (str): [UNKNOWN USE]

    Returns:
        tuple: [retunes a tuple of the filters and the basepath]
    """
    filters = filter_method()
    base_path = get_paths(
        'phase_two_csv'
    ) if folder_extension == None else f"{get_paths('phase_two_csv')}/{folder_extension}"
    if not folder_extension == None and not os.path.exists(base_path):
        os.mkdir(base_path)

    return filters, base_path
Exemple #7
0
def run_biksture(index,
                 data_to_test_on,
                 run_base_experiments=True,
                 run_ideal_experiments=True,
                 run_lobster_experiments=True,
                 run_lobster_level_experiments=True):
    test_path = get_h5_test()
    train_path = get_h5_train()

    noise_test_path = get_h5_test_noise()
    noise_train_path = get_h5_train_noise()

    homo_test_path = get_h5_test_homo()
    homo_train_path = get_h5_train_homo()

    dehaze_test_path = get_paths('h5_test_dehaze')
    dehaze_train_path = get_paths('h5_train_dehaze')

    ideal_noise_test_path = get_paths('h5_test_ideal_noise')
    ideal_noise_train_path = get_paths('h5_train_ideal_noise')

    noise_paths = get_noise_paths()
    homo_path = get_homo_paths()
    ideal_path = get_ideal_paths()
    ideal_noise_path = get_ideal_noise_paths()
    dehaze_path = get_dehaze_path()

    ideal_noise_worked = False
    ideal_worked = False

    exclude_folders = []

    ideal_and_lobster_on_one_model = True

    base_ex = f"{index}/experiment_two_data"
    base_result = f"{index}/experiment_two_result"
    base_big_lobster = f"{index}/experiment_big_lobster"
    base_big_lobster_level = f"{index}/experiment_big_lobster_level"

    create_dirs(base_ex, base_result, base_big_lobster, index)

    errors = []

    if run_base_experiments:
        # # Baseline experiment
        # baseline_folder = "experiment_baseline"
        # run_default_experiment(baseline_folder, base_ex, test_path, train_path, data_to_test_on, 'base',
        #                 filter_method=None, condition=True, model_paths=None, train_model=True, run_on_one_model=False)

        # # Normalized experiment
        # norm_folder = "experiment_two_eval_norm"
        # run_default_experiment(norm_folder, base_ex, test_path, train_path, data_to_test_on, 'norm',
        #                 filter_method=None, condition=True, model_paths=None, train_model=True, run_on_one_model=False)

        # # Homomorpic experiment
        # homo_folder = "experiment_two_eval_homo"
        # run_default_experiment(homo_folder, base_ex, homo_test_path, homo_train_path, data_to_test_on, 'base',
        #                 filter_method=load_homo_filters, condition=True, model_paths=homo_path, train_model=True, run_on_one_model=False)

        # # Dehaze experiment
        # dehaze_folder = "experiment_two_eval_dehaze"
        # run_default_experiment(dehaze_folder, base_ex, dehaze_test_path, dehaze_train_path, data_to_test_on, 'base',
        #                 filter_method=load_dehaze_filters, condition=True, model_paths=dehaze_path, train_model=True, run_on_one_model=False)

        # # Noise experiment
        # noise_folder = "experiment_two_eval_noise"
        # run_default_experiment(noise_folder, base_ex, noise_test_path, noise_train_path, data_to_test_on, 'base', filter_method=None,
        #                 condition=True, model_paths=noise_paths, train_model=True, run_on_one_model=False, two_test_path=test_path)

        # run_default_experiment(folder_name, base_folder, test_path, train_path, get_models_method, data_to_test_on,
        #                 model_types, filter_method=None, condition=True, model_paths=None, train_model=True, run_on_one_model=False)
        try:
            baseline_folder = "experiment_baseline"
            ex_folder = get_ex_folder(baseline_folder, base_ex)
            introduce_experiment(baseline_folder)
            ex_one(test_path,
                   train_path,
                   folder_extension=ex_folder,
                   data_to_test_on=data_to_test_on)
            ex_two_eval_noise(test_path,
                              ex_folder,
                              data_to_test_on=data_to_test_on)
        except:
            print("ERROR IN EXPERIMENT 'TRAIN ON BASELINE'")
            e = sys.exc_info()
            print(e)
            errors.append(e)

        try:
            norm_folder = "experiment_two_eval_norm"
            ex_folder = get_ex_folder(norm_folder, base_ex)
            introduce_experiment(norm_folder)
            ex_two_eval_norm(test_path,
                             train_path,
                             folder_extension=ex_folder,
                             data_to_test_on=data_to_test_on)
            ex_two_eval_noise(
                test_path,
                ex_folder,
                get_models=get_satina_gains_model_norm_object_list,
                data_to_test_on=data_to_test_on)
        except:
            print("ERROR IN EXPERIMENT 'TRAIN ON NORM'")
            e = sys.exc_info()
            print(e)
            errors.append(e)

        try:
            homo_folder = "experiment_two_eval_homo"
            ex_folder = get_ex_folder(homo_folder, base_ex)
            introduce_experiment(homo_folder)
            ex_one(homo_test_path,
                   homo_train_path,
                   folder_extension=ex_folder,
                   data_to_test_on=data_to_test_on,
                   model_paths=homo_path)
            ex_two_eval_noise(homo_test_path,
                              ex_folder,
                              data_to_test_on=data_to_test_on,
                              model_paths=homo_path,
                              filter_method=load_homo_filters)
        except:
            print("ERROR IN EXPERIMENT 'TRAIN ON H**O'")
            e = sys.exc_info()
            print(e)
            errors.append(e)

        # try:
        #     dehaze_folder = "experiment_two_eval_dehaze"
        #     ex_folder = get_ex_folder(dehaze_folder, base_ex)
        #     introduce_experiment(dehaze_folder)
        #     ex_one(dehaze_test_path, dehaze_train_path, folder_extension=ex_folder, data_to_test_on=data_to_test_on, model_paths=dehaze_path)
        #     ex_two_eval_noise(dehaze_test_path, ex_folder, data_to_test_on=data_to_test_on, filter_method=load_dehaze_filters, model_paths=dehaze_path)
        # except:
        #     print("ERROR IN EXPERIMENT 'TRAIN ON DEHAZE'")
        #     e = sys.exc_info()
        #     print(e)
        #     errors.append(e)

        try:
            noise_folder = "experiment_two_eval_noise"
            ex_folder = get_ex_folder(noise_folder, base_ex)
            introduce_experiment(noise_folder)
            ex_one(noise_test_path,
                   noise_train_path,
                   folder_extension=ex_folder,
                   model_paths=noise_paths,
                   data_to_test_on=data_to_test_on)
            ex_two_eval_noise(test_path,
                              ex_folder,
                              model_paths=noise_paths,
                              data_to_test_on=data_to_test_on)
        except:
            print("ERROR IN EXPERIMENT 'TRAIN ON NOISE'")
            e = sys.exc_info()
            print(e)
            errors.append(e)

    if run_ideal_experiments:
        try:
            ideal_folder = "experiment_two_eval_ideal"
            ex_folder = get_ex_folder(ideal_folder, base_result)
            introduce_experiment(ideal_folder)
            ex_two_eval_norm(homo_test_path,
                             homo_train_path,
                             folder_extension=ex_folder,
                             data_to_test_on=data_to_test_on,
                             model_paths=ideal_path,
                             run_on_one_model=ideal_and_lobster_on_one_model)
            ideal_worked = True
            # ex_two_eval_noise(homo_test_path, ex_folder, get_models=get_satina_gains_model_norm_object_list, data_to_test_on=data_to_test_on, model_paths=ideal_path, filter_method=load_homo_filters, run_on_one_model=ideal_and_lobster_on_one_model)
        except Exception as e:
            print("ERROR IN EXPERIMENT 'TRAIN ON IDEAL'")
            e = sys.exc_info()
            print(e)
            errors.append(e)

        try:
            ideal_noise_folder = "experiment_two_eval_idealnoise"
            ex_folder = get_ex_folder(ideal_noise_folder, base_result)
            introduce_experiment(ideal_noise_folder)
            ex_two_eval_norm(ideal_noise_test_path,
                             ideal_noise_train_path,
                             folder_extension=ex_folder,
                             data_to_test_on=data_to_test_on,
                             model_paths=ideal_noise_path,
                             run_on_one_model=ideal_and_lobster_on_one_model)
            ideal_noise_worked = True
            # ex_two_eval_noise(homo_test_path, ex_folder, get_models=get_satina_gains_model_norm_object_list, data_to_test_on=data_to_test_on, model_paths=ideal_noise_path, filter_method=load_homo_filters, run_on_one_model=ideal_and_lobster_on_one_model)
        except Exception as e:
            print("ERROR IN EXPERIMENT 'TRAIN ON IDEAL'")
            e = sys.exc_info()
            print(e)
            errors.append(e)

    if run_lobster_experiments:
        try:
            if ideal_worked or not run_ideal_experiments:
                ideal_lobster_folder = "experiment_two_big_lobster_ideal"
                ex_folder = get_ex_folder(ideal_lobster_folder,
                                          base_big_lobster)
                introduce_experiment(ideal_lobster_folder)
                ex_two_eval_noise(
                    homo_test_path,
                    ex_folder,
                    get_models=get_satina_gains_model_norm_object_list,
                    data_to_test_on=data_to_test_on,
                    model_paths=ideal_path,
                    filter_method=load_lobster_filters,
                    run_on_one_model=ideal_and_lobster_on_one_model)
            else:
                print(
                    "---\nWARNING: experiment lobster_ideal will not run since an error occured when training the model\n---"
                )
        except Exception as e:
            print("ERROR IN EXPERIMENT 'TRAIN ON IDEAL LOBSTER'")
            e = sys.exc_info()
            print(e)
            errors.append(e)

        try:
            if ideal_noise_worked or not run_ideal_experiments:
                noise_lobster_folder = "experiment_two_big_lobster_noise"
                ex_folder = get_ex_folder(noise_lobster_folder,
                                          base_big_lobster)
                introduce_experiment(noise_lobster_folder)
                ex_two_eval_noise(
                    homo_test_path,
                    ex_folder,
                    get_models=get_satina_gains_model_norm_object_list,
                    data_to_test_on=data_to_test_on,
                    model_paths=ideal_noise_path,
                    filter_method=load_lobster_filters,
                    run_on_one_model=ideal_and_lobster_on_one_model)
            else:
                print(
                    "---\nWARNING: experiment lobster_noise will not run since an error occured when training the model\n---"
                )
        except Exception as e:
            print("ERROR IN EXPERIMENT 'TRAIN ON NOISE LOBSTER'")
            e = sys.exc_info()
            print(e)
            errors.append(e)

    if run_lobster_level_experiments:
        e1, exclude_folders = lobster_noise_level(
            'fog', data_to_test_on, base_big_lobster_level, homo_test_path,
            ideal_path, ideal_noise_path, load_lobster_level_filters_fog,
            ideal_and_lobster_on_one_model)
        # e2, _ = lobster_noise_level('night', data_to_test_on, base_big_lobster_level, homo_test_path, ideal_path, ideal_noise_path, load_lobster_level_filters_night, ideal_and_lobster_on_one_model)
        # e3, _ = lobster_noise_level('rain', data_to_test_on, base_big_lobster_level, homo_test_path, ideal_path, ideal_noise_path, load_lobster_level_filters_rain, ideal_and_lobster_on_one_model)
        # e4, exclude_folders = lobster_noise_level('snow', data_to_test_on, base_big_lobster_level, homo_test_path, ideal_path, ideal_noise_path, load_lobster_level_filters_snow, ideal_and_lobster_on_one_model)
        # extend_errors(errors, [e1, e2, e3, e4])

    try:
        sum_merged_files(f'phase_two/csv_output/{index}', exclude_folders)
    except Exception as e:
        print(f"ERROR: {e}")

    if len(errors) != 0:
        time_str = time.strftime("%Y%m%d-%H%M%S")
        save_path = f"error_messages/output_error_{time_str}.txt"

        print("---------------------------")
        print(
            f"During execution, errors occured in {len(errors)} experiments. These errors can be found in the following txt documtn:\n{save_path}"
        )
        print("---------------------------")

        with open(save_path, 'w') as output:
            output.write(str(errors))
Exemple #8
0
def get_dehaze_path():
    return [
        get_paths('satina_median_dehaze'),
        get_paths('satina_avg_dehaze'),
        get_paths('satina_mode_dehaze')
    ]
Exemple #9
0
def get_ideal_noise_paths():
    return [
        get_paths('satina_median_idealnoise'),
        get_paths('satina_avg_idealnoise'),
        get_paths('satina_mode_idealnoise')
    ]
Exemple #10
0
def get_homo_paths():
    return [
        get_paths('satina_median_homo'),
        get_paths('satina_avg_homo'),
        get_paths('satina_mode_homo')
    ]
Exemple #11
0
def get_noise_paths():
    return [
        get_paths('satina_median_noise'),
        get_paths('satina_avg_noise'),
        get_paths('satina_mode_noise')
    ]
        print('Not implemented')

    #def setup_files(path):
    #    print('Not implemented')

    #def initialize(path):
    #    print('Not implemented')


def delete_dest(dest_path):
    if os.path.exists(dest_path):
        shutil.rmtree(dest_path)


if __name__ == '__main__':
    path = r'Dataset/miladgainsimage'
    dest_path = r'Dataset/ETSD_Adjusted'

    delete_dest(dest_path)

    complex_runner(path_to_original=path)
    run_split_dataset(dest_path, 0.3, path)

    train_image_path = f"{dest_path}/Training"
    test_image_path = f"{dest_path}/Testing"

    generate_h5(get_paths('h5_train'), train_image_path)
    generate_h5(get_paths('h5_test'), test_image_path)

    generate_datasets()
Exemple #13
0
from slave_main import run_biksture
from final_avg import calc_avg_from_experiments
from global_paths import get_paths

def get_phase_one_csv():
    return ['fitdata_combined.csv', 'test_val_sum_class_accuracy.csv']

def get_phase_two_csv():
    return ['final_sum_sum_sum_summed.csv' , 'sum_cat', 'sum_sub_cat.csv']

if __name__ == "__main__":
    s_index = 0
    e_index = 0
    data_to_test_on = 1
    
    phase_one_path = get_paths('phase_one_csv')
    phase_two_path = get_paths('phase_two_csv')
    
    phase_one_csv = get_phase_one_csv()
    phase_two_csv = get_phase_two_csv()
    
    output_folder = get_paths('result_csv')
    
    tic = time.time()
    
    for i in range(s_index, e_index):
        run_biksture(i, data_to_test_on, 
                    run_base_experiments=False,
                    run_ideal_experiments=True, 
                    run_lobster_experiments=False, 
                    run_lobster_level_experiments=True
Exemple #14
0
# C:\Users\madsh\OneDrive\Code\Python\BiksTurePy\phase_two\csv_output\2\experiment_big_lobster\experiment_two_big_lobster_ideal
from global_paths import get_paths
import sys, os

if __name__ == '__main__':
    phase_one_base = get_paths('phase_one_csv')
    phase_two_base = get_paths('phase_two_csv')
    
    p_one = "experiment_big_lobster/experiment_two_big_lobster_ideal"
    
    sizes_to_remove = ['49', '52']
    
    for i in range(10):
        path_to_check = os.path.join(phase_two_base, f"{i}/{p_one}")
        for path in os.listdir(path_to_check):
            if len([a for a in sizes_to_remove if a in path]) != 0:
                path_to_delete = os.path.join(path_to_check, path)
                os.remove(path_to_delete)
                print(f"{path} has been successfully deleted")
Exemple #15
0
def get_model_string(csv_file):
    return "model" + str(get_highest_val(csv_file)[1]) + "_" + str(get_highest_val(csv_file)[0])

def compare_csv(csv1, csv2):
    col_vals = []
    top_model = get_model_string(csv1)
    with open(csv2, 'r') as csv_file:
        reader = csv.DictReader(csv_file)
        for row in reader:
            col_vals.append(row[top_model])
    with open('ex13.csv', 'w', newline='') as csv_file:
        fieldnames = ['Class', top_model]
        print (fieldnames)
        writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
        writer.writeheader()
        for i in range(len(col_vals)):
            writer.writerow({'Class': i, top_model: col_vals[i]})





csv1 = get_paths('phase_one_csv') + "/" + "class_accuracy.csv"
csv2 = get_paths('phase_one_csv') + "/" + "model32_summed.csv"

# get_highest_val(csv2)
# get_model_string(csv2)

compare_csv(csv2,csv1)