예제 #1
0
    # write predator graph
    predator_graph_data = [[data_point[0], data_point[2]] for data_point in datas[i]]
    predator_graph_data = unite_data(predator_graph_data, model_predator_data1)
    predator_graph_data = unite_data(predator_graph_data, model_predator_data2)
    export_array_array_to_csv(predator_graph_data,
                              output_folder + "/" + single_data_set_predator_graph_format.format(i+1))

# write the error graph file
single_data_errors_data = [[i] + [single_data_errors[j][i] for j in range(2*len(data_file_names))]
                           for i in range(single_data_set_num_steps+1)]
export_array_array_to_csv(single_data_errors_data, output_folder + "/" + single_data_set_errors_out)

# run both the normal and modified gradient descent algorithms on all datasets together
new_parameters1 = find_minimal_parameters(datas, model, initial_parameters, epsilon1, step_size1,
                                          all_data_sets_num_steps, use_improved_descent=False, y_factor=y_factor,
                                          error_out=all_data_sets_errors[0], debug_mod=debug_mod)
new_parameters2 = find_minimal_parameters(datas, model, initial_parameters, epsilon2, step_size2,
                                          all_data_sets_num_steps, use_improved_descent=True, y_factor=y_factor,
                                          error_out=all_data_sets_errors[1], debug_mod=debug_mod,
                                          minimal_second_gradient=minimal_second_gradient)
# write to the output file content string
result_file_content += "Using all data:\n"
result_file_content += "Normal gradient descent found the following parameters that gave error {}:\n {}\n".format(
    all_data_sets_errors[0][-1], str(new_parameters1)
)
result_file_content += "Improved gradient descent found the following parameters that gave error {}:\n {}\n".format(
    all_data_sets_errors[1][-1], str(new_parameters2)
)
result_file_content += "\n"
예제 #2
0
def analise(name,
            model,
            initial_parameters,
            data_file_names,
            analysis_parameters,
            debug_mod=1000,
            model_graph_dt=0.01,
            output_folder_location=""):
    # some definitions
    step_size11 = analysis_parameters["step_size11"]
    step_size12 = analysis_parameters["step_size11"]
    step_size21 = analysis_parameters["step_size21"]
    step_size22 = analysis_parameters["step_size22"]
    epsilon1 = analysis_parameters["epsilon1"]
    epsilon2 = analysis_parameters["epsilon2"]
    minimal_second_gradient = analysis_parameters["minimal_second_gradient"]
    e1 = analysis_parameters["e1"]
    e2 = analysis_parameters["e2"]
    single_data_set_num_steps = analysis_parameters[
        "single_data_set_num_steps"]
    all_data_sets_num_steps = analysis_parameters["all_data_sets_num_steps"]

    output_folder = name + "AnalysisResults" if output_folder_location == "" else \
        output_folder_location + os.path.sep + name + "AnalysisResults"
    log_file = name + "_log.txt"

    # create an empty string for the contents of the result_file to be contained in
    result_file_content = ""

    # create an empty array to store the names of all of the generated csv files
    csv_file_names = []

    # load data from files
    datas = []
    for file_name in data_file_names:
        datas.append(get_data_from_csv(file_name))

    # create empty arrays for the errors and parameters to be stored in
    single_data_errors = [[] for _ in range(2 * len(datas))]
    all_data_sets_errors = [[], []]
    single_data_parameters = [[] for _ in range(2 * len(datas))]
    all_data_parameters = [[], []]

    # create the output folder if it doesn't exist already:
    if not os.path.exists(output_folder):
        os.mkdir(output_folder)
    # run both the normal and modified gradient descent algorithms on all datasets seperately
    for i in range(len(datas)):
        new_parameters1 = find_minimal_parameters(
            [datas[i]],
            model,
            initial_parameters,
            epsilon1,
            step_size11,
            single_data_set_num_steps,
            use_improved_descent=False,
            error_out=single_data_errors[2 * i],
            parameter_out=single_data_parameters[2 * i],
            debug_mod=debug_mod,
            e1=e1,
            e2=e2)

        new_parameters2 = find_minimal_parameters(
            [datas[i]],
            model,
            initial_parameters,
            epsilon2,
            step_size21,
            single_data_set_num_steps,
            use_improved_descent=True,
            error_out=single_data_errors[2 * i + 1],
            parameter_out=single_data_parameters[2 * i + 1],
            minimal_second_gradient=minimal_second_gradient,
            debug_mod=debug_mod,
            e1=e1,
            e2=e2)

        # write to the output file content string
        result_file_content += "Using data from {}:\n".format(
            data_file_names[i])
        result_file_content += "Normal gradient descent found the following parameters that gave error {}:\n {}\n".format(
            min(single_data_errors[2 * i]), str(new_parameters1))
        result_file_content += "Improved gradient descent found the following parameters that gave error {}:\n {}\n".format(
            min(single_data_errors[2 * i + 1]), str(new_parameters2))
        result_file_content += "\n"

        # write some files that are used to create graphs to visually compare the model
        csv_file_name = output_folder + "/" + single_data_set_graph_format.format(
            i + 1)
        write_comparison_csv(csv_file_name,
                             datas[i],
                             model, [new_parameters1, new_parameters2],
                             model_dt=model_graph_dt,
                             headers=[
                                 "D" + str(i + 1),
                                 "{} {} {}".format(name, "D" + str(i + 1),
                                                   "NGD"),
                                 "{} {} {}".format(name, "D" + str(i + 1),
                                                   "AGD")
                             ])
        csv_file_names.append(csv_file_name)

    # write the error graph file
    single_data_errors_data = [
        [i0] + [single_data_errors[j][i0] for j in range(2 * len(datas))]
        for i0 in range(single_data_set_num_steps + 1)
    ]
    csv_file_name = output_folder + "/" + single_data_set_errors_out
    headers = [""]
    for i in range(len(datas)):
        headers.append("{} D{} NGD".format(name, str(i + 1)))
        headers.append("{} D{} AGD".format(name, str(i + 1)))
    export_array_array_to_csv(single_data_errors_data,
                              csv_file_name,
                              headers=headers)
    csv_file_names.append(csv_file_name)

    # write the parameters file
    single_data_parameters_data = [[None] + conc([[
        "{}_{}_{}".format(str(k // 2), str(k % 2 + 1), key)
        for key in initial_parameters.keys()
    ] for k in range(2 * len(datas))])]
    single_data_parameters_data += [
        [i0] +
        conc([single_data_parameters[j][i0] for j in range(2 * len(datas))])
        for i0 in range(single_data_set_num_steps + 1)
    ]
    csv_file_name = output_folder + "/" + single_data_set_parameters_out
    export_array_array_to_csv(single_data_parameters_data, csv_file_name)
    del single_data_parameters_data
    del single_data_parameters

    # run both the normal and modified gradient descent algorithms on all datasets together
    new_parameters1 = find_minimal_parameters(
        datas,
        model,
        initial_parameters,
        epsilon1,
        step_size12,
        all_data_sets_num_steps,
        use_improved_descent=False,
        parameter_out=all_data_parameters[0],
        error_out=all_data_sets_errors[0],
        debug_mod=debug_mod,
        e1=e1,
        e2=e2)
    new_parameters2 = find_minimal_parameters(
        datas,
        model,
        initial_parameters,
        epsilon2,
        step_size22,
        all_data_sets_num_steps,
        use_improved_descent=True,
        parameter_out=all_data_parameters[1],
        error_out=all_data_sets_errors[1],
        debug_mod=debug_mod,
        minimal_second_gradient=minimal_second_gradient,
        e1=e1,
        e2=e2)
    # write to the output file content string
    result_file_content += "Using all data:\n"
    result_file_content += "Normal gradient descent found the following parameters that gave error {}:\n {}\n".format(
        min(all_data_sets_errors[0]), str(new_parameters1))
    result_file_content += "Improved gradient descent found the following parameters that gave error {}:\n {}\n".format(
        min(all_data_sets_errors[1]), str(new_parameters2))
    result_file_content += "\n"

    # write some files that are used to create graphs to visually compare the model
    for i in range(len(datas)):
        csv_file_name = output_folder + "/" + all_data_sets_graph_format.format(
            i + 1)
        write_comparison_csv(csv_file_name,
                             datas[i],
                             model, [new_parameters1, new_parameters2],
                             model_dt=model_graph_dt,
                             headers=[
                                 "D" + str(i + 1),
                                 "{} {} {}".format(name, "AD", "NGD"),
                                 "{} {} {}".format(name, "D" + str(i + 1),
                                                   "AGD")
                             ])
        csv_file_names.append(csv_file_name)

    # write the second error graph file
    all_data_sets_errors_data = [
        [i] + [all_data_sets_errors[0][i], all_data_sets_errors[1][i]]
        for i in range(all_data_sets_num_steps + 1)
    ]
    csv_file_name = output_folder + "/" + all_data_sets_errors_out
    export_array_array_to_csv(all_data_sets_errors_data,
                              csv_file_name,
                              headers=["", name + " AD NGD", name + " AD AGD"])
    csv_file_names.append(csv_file_name)

    # write the second parameters file
    all_data_parameters_data = [[None] + conc(
        [["{}_{}".format(str(k), key) for key in initial_parameters.keys()]
         for k in range(2)])]
    all_data_parameters_data += [[i0] + all_data_parameters[0][i0] +
                                 all_data_parameters[1][i0]
                                 for i0 in range(all_data_sets_num_steps + 1)]
    csv_file_name = output_folder + "/" + all_data_sets_parameters_out
    export_array_array_to_csv(all_data_parameters_data, csv_file_name)
    del all_data_parameters_data
    del all_data_parameters

    # write to the result excel file
    excel_writer = pd.ExcelWriter(output_folder + os.path.sep +
                                  resulting_data_excel_format.format(name))
    for file_name in csv_file_names:
        df = pd.read_csv(file_name, sep=";", decimal=",", header=0)
        df.to_excel(excel_writer,
                    os.path.splitext(ntpath.basename(file_name))[0],
                    header=True,
                    index=False)
    excel_writer.save()

    # write to the result file
    result_file = open(output_folder + r"/" + results_out, "w")
    result_file.write(result_file_content)
    result_file.close()

    # write to log file
    log_results(name, analysis_parameters, result_file_content, log_file)
예제 #3
0
m_values = [
    1e-8, 0.8e-7, 0.9e-7, 0.98e-7, 0.99e-7, 1e-7, 1.01e-7, 1.02e-7, 1.1e-7,
    1.2e-7, 1e-6, 1e-5, 1e-4
]
data_set_num = 2

parameters = fetch_parameters("../CLVAnalysisResults", False, min_step, "AGD",
                              data_set_num)

errors = [[] for _ in m_values]

for i, m in enumerate(m_values):
    find_minimal_parameters([datas[data_set_num - 1]],
                            model,
                            parameters,
                            epsilon,
                            step_size,
                            max_step - min_step,
                            debug_mod=debug_mod,
                            e1=e1,
                            e2=e2,
                            minimal_second_gradient=m,
                            error_out=errors[i],
                            use_improved_descent=True)

lines = [[i] + [errors[j][i] for j in range(len(errors))]
         for i in range(max_step - min_step + 1)]

export_array_array_to_csv(lines, "test_result6.csv",
                          [""] + ["m={:.2E}".format(m) for m in m_values])