Exemple #1
0
# Initialise holding arrays
log_likelihood = np.zeros_like(element_skew)
avg_log_likelihood = np.zeros_like(element_skew)
selected_quadrats_n = np.zeros_like(element_skew)

# Initialise Array containing Frobenius Norm
frob_norm = np.zeros_like(element_skew)

start_iteration = time.clock()

# Over here, I am not trying to optimize for matrix variables, but just optimizing for the kernel
for i in range(iterate_count):
    # initial_mat_var = np.array([mat_element[a], 0, mat_element[c], mat_element[d]])
    start_iteration = time.clock()
    initial_mat_var = np.array([element_skew[i], 0, 0, element_skew[i]])
    frob_norm[i] = fn.frob_norm(initial_mat_var)
    print(' ------------- Start of Current Iteration', i + 1)
    print('The Current Matrix Variables are', initial_mat_var)
    print('The Current Frobenius Norm is', frob_norm[i])

    xy_scatter_transformed = fn.transform_array(initial_mat_var, xy_within_box,
                                                center)
    x_points_trans = xy_scatter_transformed[0]
    y_points_trans = xy_scatter_transformed[1]

    # Obtain the maximum range in x and y in the transformed space
    # Transform the vertices
    transformed_vertices = fn.transform_array(initial_mat_var, vertices,
                                              center)

    x_down = min(transformed_vertices[0])
Exemple #2
0
                                scalar])  # Initial values for matrix variables
    solution_val = scopt.minimize(fun=linear_trans_opt,
                                  args=arguments_opt,
                                  x0=initial_mat_var,
                                  method='Nelder-Mead',
                                  options={
                                      'xatol': 1,
                                      'fatol': 100,
                                      'disp': True,
                                      'maxfev': 500
                                  })

    matrix_variables_mat[
        i, :] = solution_val.x  # This determines the optimal transformation matrix
    log_likelihood_array[i] = -1 * solution_val.fun
    frob_array[i] = fn.frob_norm(solution_val.x)

    # Create status output
    print('The Matrix Variables are', matrix_variables_mat[i, :])
    print('The Frobenius Norm is', frob_array[i])
    print('The Log Marginal Likelihood is', log_likelihood_array[i])

end_opt = time.clock()
print('Time taken for Latin Hypercube and Nelder-Mead Optimization is',
      end_opt - start_opt)
# Select the optimal starting points, and the optimal matrix variables corresponding to greatest Log Likelihood
# Create index of the maximum log likelihood
opt_index = np.argmax(log_likelihood_array)
max_likelihood = log_likelihood_array[opt_index]
opt_matrix_variables = matrix_variables_mat[opt_index, :]
Exemple #3
0
    0.00000,
    dtype=float)

start_iteration = time.clock()

# Over here, I am not trying to optimize for matrix variables, but just optimizing for the kernel
for a in range(iterate_count):
    for b in range(iterate_count):
        for c in range(iterate_count):
            for d in range(iterate_count):
                # initial_mat_var = np.array([mat_element[a], 0, mat_element[c], mat_element[d]])
                initial_mat_var = np.array([
                    mat_element_a_c[a], mat_element_b_d[b], mat_element_a_c[c],
                    mat_element_b_d[d]
                ])
                frob_norm[a, b, c, d] = fn.frob_norm(initial_mat_var)
                print(
                    ' ------------- Start of Current Iteration -------------')
                print('The Current Matrix Variables are', initial_mat_var)
                print('The Current Frobenius Norm is', frob_norm[a, b, c, d])

                xy_scatter_transformed = fn.transform_array(
                    initial_mat_var, xy_within_box, center)
                x_points_trans = xy_scatter_transformed[0]
                y_points_trans = xy_scatter_transformed[1]

                # Obtain the maximum range in x and y in the transformed space
                # Transform the vertices
                transformed_vertices = fn.transform_array(
                    initial_mat_var, vertices, center)
Exemple #4
0
# Provide the optimal transformation matrix variables tabulated beforehand
# ChangeParam  - do we perform the desired transformation?
transform = 'yes'

if transform == 'yes':
    transform_matrix_array = np.array(
        [0.30117594, 0.92893405, 0.65028918, -0.2277159])
elif transform == 'special':
    transform_matrix_array = np.array(
        [-0.30117594, 0.92893405, -0.65028918, -0.2277159])
elif transform == 'line':
    transform_matrix_array = np.array([1, 1, 1, 1])
else:
    transform_matrix_array = np.array([1, 0, 0, 1])

frob_norm = fn.frob_norm(transform_matrix_array)

print('The optimal Transformation Matrix Variables are',
      transform_matrix_array)
print('The optimal Frobenius Norm is', frob_norm)

# ChangeParam - Conduct the transformation about the center of the regression window
transformed_xy_within_box = fn.transform_array(transform_matrix_array,
                                               xy_within_box, center)
x_points_trans = transformed_xy_within_box[0]
y_points_trans = transformed_xy_within_box[1]

# Obtain the maximum range in x and y in the transformed space - to define the regression window
# This is to maximise the number of selected quadrats
x_min = min(x_points_trans)
x_max = max(x_points_trans)