from ModelMain import s_squared, improved_gradient_descent_step
from QuickDataVisualizer import write_comparison_csv
from Utils import get_data_from_csv, find_t0_data_point
from DifferentialModels import Harissons
from AnalyisisMain import fetch_parameters

# create an instance of the EP model
dt = 0.005  # parameters for the Euler's method algorithm
max_t = 6.6
model = Harissons(dt, max_t)


parameters = [fetch_parameters("../HarissonsAnalysisResults", False, data_set_num=2, step_num=1853, method="AGD")]
# some parameters
e1 = 0.01  # value of e1 used in the calculating S^2
e2 = 0.025  # value of e2 used in the calculating S^2

datas = [get_data_from_csv("../Data/exp4_beker{}_data.csv".format(i), t_scale=1) for i in range(1, 5)]
step = improved_gradient_descent_step([datas[1]], model, parameters[0], 1e-5, 1e-1, minimal_second_gradient=1e-7, e1=0.01, e2=0.025)

parameters.append(parameters[0].copy())
for key in parameters[0].keys():
    parameters[-1][key] = parameters[0][key] + step[key]
for p in parameters:
    print(p)


write_comparison_csv("test_result7.csv", datas[1], model, parameters, 0.01)
示例#2
0
from QuickDataVisualizer import print_data, print_model
from Utils import get_data_from_csv

# define initial parameters
initial_parameters = {"alpha": 2, "beta": 0.06, "gamma": 0.03, "delta": 0.2}

# create an approximation of the Lotka-Volterra model
model = LV(dt=0.005, max_t=10)

# create data
test_parameters = {"alpha": 1, "beta": 0.05, "gamma": 0.02, "delta": 0.14}
x0 = 20
y0 = 30
# data = model.get_data(test_parameters, x0, y0, 0, 10, 31)

data = get_data_from_csv("exp4_data_1.csv", t_scale=1)

# new_parameters = gradient_descent_minimal_parameters(data, model, initial_parameters, 0.00001, 1e-7, 10000)
new_parameters = gradient_descent_minimal_parameters2(data,
                                                      model,
                                                      initial_parameters,
                                                      0.00001,
                                                      1e1,
                                                      10000,
                                                      y_factor=100,
                                                      debug_mod=100)
# new_parameters = find_minimal_parameters([data], model, initial_parameters, 0.00001, 1e1, 10000,
#                                          y_factor=100, debug_mod=100)

x0 = data[0][1]
y0 = data[0][2]
def analise(name,
            model,
            initial_parameters,
            data_file_names,
            analysis_parameters,
            debug_mod=1000,
            model_graph_dt=0.01,
            output_folder_location=""):
    # some definitions
    step_size11 = analysis_parameters["step_size11"]
    step_size12 = analysis_parameters["step_size11"]
    step_size21 = analysis_parameters["step_size21"]
    step_size22 = analysis_parameters["step_size22"]
    epsilon1 = analysis_parameters["epsilon1"]
    epsilon2 = analysis_parameters["epsilon2"]
    minimal_second_gradient = analysis_parameters["minimal_second_gradient"]
    e1 = analysis_parameters["e1"]
    e2 = analysis_parameters["e2"]
    single_data_set_num_steps = analysis_parameters[
        "single_data_set_num_steps"]
    all_data_sets_num_steps = analysis_parameters["all_data_sets_num_steps"]

    output_folder = name + "AnalysisResults" if output_folder_location == "" else \
        output_folder_location + os.path.sep + name + "AnalysisResults"
    log_file = name + "_log.txt"

    # create an empty string for the contents of the result_file to be contained in
    result_file_content = ""

    # create an empty array to store the names of all of the generated csv files
    csv_file_names = []

    # load data from files
    datas = []
    for file_name in data_file_names:
        datas.append(get_data_from_csv(file_name))

    # create empty arrays for the errors and parameters to be stored in
    single_data_errors = [[] for _ in range(2 * len(datas))]
    all_data_sets_errors = [[], []]
    single_data_parameters = [[] for _ in range(2 * len(datas))]
    all_data_parameters = [[], []]

    # create the output folder if it doesn't exist already:
    if not os.path.exists(output_folder):
        os.mkdir(output_folder)
    # run both the normal and modified gradient descent algorithms on all datasets seperately
    for i in range(len(datas)):
        new_parameters1 = find_minimal_parameters(
            [datas[i]],
            model,
            initial_parameters,
            epsilon1,
            step_size11,
            single_data_set_num_steps,
            use_improved_descent=False,
            error_out=single_data_errors[2 * i],
            parameter_out=single_data_parameters[2 * i],
            debug_mod=debug_mod,
            e1=e1,
            e2=e2)

        new_parameters2 = find_minimal_parameters(
            [datas[i]],
            model,
            initial_parameters,
            epsilon2,
            step_size21,
            single_data_set_num_steps,
            use_improved_descent=True,
            error_out=single_data_errors[2 * i + 1],
            parameter_out=single_data_parameters[2 * i + 1],
            minimal_second_gradient=minimal_second_gradient,
            debug_mod=debug_mod,
            e1=e1,
            e2=e2)

        # write to the output file content string
        result_file_content += "Using data from {}:\n".format(
            data_file_names[i])
        result_file_content += "Normal gradient descent found the following parameters that gave error {}:\n {}\n".format(
            min(single_data_errors[2 * i]), str(new_parameters1))
        result_file_content += "Improved gradient descent found the following parameters that gave error {}:\n {}\n".format(
            min(single_data_errors[2 * i + 1]), str(new_parameters2))
        result_file_content += "\n"

        # write some files that are used to create graphs to visually compare the model
        csv_file_name = output_folder + "/" + single_data_set_graph_format.format(
            i + 1)
        write_comparison_csv(csv_file_name,
                             datas[i],
                             model, [new_parameters1, new_parameters2],
                             model_dt=model_graph_dt,
                             headers=[
                                 "D" + str(i + 1),
                                 "{} {} {}".format(name, "D" + str(i + 1),
                                                   "NGD"),
                                 "{} {} {}".format(name, "D" + str(i + 1),
                                                   "AGD")
                             ])
        csv_file_names.append(csv_file_name)

    # write the error graph file
    single_data_errors_data = [
        [i0] + [single_data_errors[j][i0] for j in range(2 * len(datas))]
        for i0 in range(single_data_set_num_steps + 1)
    ]
    csv_file_name = output_folder + "/" + single_data_set_errors_out
    headers = [""]
    for i in range(len(datas)):
        headers.append("{} D{} NGD".format(name, str(i + 1)))
        headers.append("{} D{} AGD".format(name, str(i + 1)))
    export_array_array_to_csv(single_data_errors_data,
                              csv_file_name,
                              headers=headers)
    csv_file_names.append(csv_file_name)

    # write the parameters file
    single_data_parameters_data = [[None] + conc([[
        "{}_{}_{}".format(str(k // 2), str(k % 2 + 1), key)
        for key in initial_parameters.keys()
    ] for k in range(2 * len(datas))])]
    single_data_parameters_data += [
        [i0] +
        conc([single_data_parameters[j][i0] for j in range(2 * len(datas))])
        for i0 in range(single_data_set_num_steps + 1)
    ]
    csv_file_name = output_folder + "/" + single_data_set_parameters_out
    export_array_array_to_csv(single_data_parameters_data, csv_file_name)
    del single_data_parameters_data
    del single_data_parameters

    # run both the normal and modified gradient descent algorithms on all datasets together
    new_parameters1 = find_minimal_parameters(
        datas,
        model,
        initial_parameters,
        epsilon1,
        step_size12,
        all_data_sets_num_steps,
        use_improved_descent=False,
        parameter_out=all_data_parameters[0],
        error_out=all_data_sets_errors[0],
        debug_mod=debug_mod,
        e1=e1,
        e2=e2)
    new_parameters2 = find_minimal_parameters(
        datas,
        model,
        initial_parameters,
        epsilon2,
        step_size22,
        all_data_sets_num_steps,
        use_improved_descent=True,
        parameter_out=all_data_parameters[1],
        error_out=all_data_sets_errors[1],
        debug_mod=debug_mod,
        minimal_second_gradient=minimal_second_gradient,
        e1=e1,
        e2=e2)
    # write to the output file content string
    result_file_content += "Using all data:\n"
    result_file_content += "Normal gradient descent found the following parameters that gave error {}:\n {}\n".format(
        min(all_data_sets_errors[0]), str(new_parameters1))
    result_file_content += "Improved gradient descent found the following parameters that gave error {}:\n {}\n".format(
        min(all_data_sets_errors[1]), str(new_parameters2))
    result_file_content += "\n"

    # write some files that are used to create graphs to visually compare the model
    for i in range(len(datas)):
        csv_file_name = output_folder + "/" + all_data_sets_graph_format.format(
            i + 1)
        write_comparison_csv(csv_file_name,
                             datas[i],
                             model, [new_parameters1, new_parameters2],
                             model_dt=model_graph_dt,
                             headers=[
                                 "D" + str(i + 1),
                                 "{} {} {}".format(name, "AD", "NGD"),
                                 "{} {} {}".format(name, "D" + str(i + 1),
                                                   "AGD")
                             ])
        csv_file_names.append(csv_file_name)

    # write the second error graph file
    all_data_sets_errors_data = [
        [i] + [all_data_sets_errors[0][i], all_data_sets_errors[1][i]]
        for i in range(all_data_sets_num_steps + 1)
    ]
    csv_file_name = output_folder + "/" + all_data_sets_errors_out
    export_array_array_to_csv(all_data_sets_errors_data,
                              csv_file_name,
                              headers=["", name + " AD NGD", name + " AD AGD"])
    csv_file_names.append(csv_file_name)

    # write the second parameters file
    all_data_parameters_data = [[None] + conc(
        [["{}_{}".format(str(k), key) for key in initial_parameters.keys()]
         for k in range(2)])]
    all_data_parameters_data += [[i0] + all_data_parameters[0][i0] +
                                 all_data_parameters[1][i0]
                                 for i0 in range(all_data_sets_num_steps + 1)]
    csv_file_name = output_folder + "/" + all_data_sets_parameters_out
    export_array_array_to_csv(all_data_parameters_data, csv_file_name)
    del all_data_parameters_data
    del all_data_parameters

    # write to the result excel file
    excel_writer = pd.ExcelWriter(output_folder + os.path.sep +
                                  resulting_data_excel_format.format(name))
    for file_name in csv_file_names:
        df = pd.read_csv(file_name, sep=";", decimal=",", header=0)
        df.to_excel(excel_writer,
                    os.path.splitext(ntpath.basename(file_name))[0],
                    header=True,
                    index=False)
    excel_writer.save()

    # write to the result file
    result_file = open(output_folder + r"/" + results_out, "w")
    result_file.write(result_file_content)
    result_file.close()

    # write to log file
    log_results(name, analysis_parameters, result_file_content, log_file)
示例#4
0
from ModelMain import find_minimal_parameters, s_squared, improved_gradient_descent_step
from QuickDataVisualizer import export_array_array_to_csv
from Utils import get_data_from_csv
from DifferentialModels import CLV
from AnalyisisMain import fetch_parameters

# some parameters
e1 = 0.01  # value of e1 used in the calculating S^2
e2 = 0.025  # value of e2 used in the calculating S^2
epsilon = 0.00001
step_size = 2e-1
debug_mod = 1000

datas = [
    get_data_from_csv("../Data/exp4_beker{}_data.csv".format(i), t_scale=1)
    for i in range(1, 5)
]

# create an instance of the CLV model
dt = 0.005  # parameters for the Euler's method algorithm
max_t = 6.6
model = CLV(dt, max_t)

# stuff
min_step = 10874
m0 = 1e-7
m_factor = 10**(1 / 10)
f_min = -30
f_max = 50
data_set_num = 3
    "sigma_f": 0.01,
    "p_f": 0.02,
    "mu": 0.03
}

# constants for the outputs
debug_mod = 1  # number that indicates how much is printed in the console for debugging
model_graph_dt = 0.01

# create an empty string for the contents of the result_file to be contained in
result_file_content = ""

# load data from files
datas = []
for file_name in data_file_names:
    datas.append(get_data_from_csv(file_name))


# create an approximation of a 3-dimensional version of Hawick, K. A., & Scogings, C. J. (2010) their model
model = CAModel(grid_shape, number_of_runs, max_t, N)

# create empty arrays for the errors to be stored in
single_data_errors = [[] for _ in range(2*len(datas))]
all_data_sets_errors = [[], []]

# create the output folder if it doesn't exist already:
if not os.path.exists(output_folder):
    os.mkdir(output_folder)


# run both the normal and modified gradient descent algorithms on all datasets seperately