Example #1
0
def bessel_zeros(order, n_zero):
    """
    These are the positive zeros of the bessel functions. 
    This function returns the nth zero of a bessel function based on the order.
    """

    # Type check.
    order = valid.validate_int_value(order, greater_than=0)
    n_zero = valid.validate_int_value(n_zero, greater_than=0)

    # For some reason, scipy wants to return all zeros from 1 to n. This
    # function only wants the last one.
    zeros = sp_spcl.jn_zeros(order, n_zero)
    return valid.validate_float_value(zeros[-1])
def multigaussian_function(x_input, center_array, std_dev_array, height_array,
                           gaussian_count=1):
    """
    Equation for a multigaussian, where the arrays are parallel, denoting the
    properties of each gaussian. Assuming all of the gaussians are linearly combined.
    """

    # Type check
    gaussian_count = valid.validate_int_value(gaussian_count, greater_than=0)
    x_input = valid.validate_float_array(x_input)
    center_array = valid.validate_float_array(
        center_array, size=gaussian_count)
    std_dev_array = valid.validate_float_array(std_dev_array,
                                               size=gaussian_count,
                                               deep_validate=True,
                                               greater_than=0)
    height_array = valid.validate_float_array(
        height_array, size=gaussian_count)

    # Define initial variables.
    n_datapoints = len(x_input)
    y_output_array = np.zeros(n_datapoints)

    # Loop and sum over all gaussian values.
    for gaussiandex in range(gaussian_count):
        y_output_array += gaussian_function(x_input,
                                            center_array[gaussiandex],
                                            std_dev_array[gaussiandex],
                                            height_array[gaussiandex])

    return np.array(y_output_array, dtype=float)
def generate_noisy_gaussian(center, std_dev, height, x_domain, noise_domain,
                            n_datapoints):
    """
    Generate a gaussian with some aspect of noise.

    Input:
        center = central x value
        std_dev = standard deviation of the function
        height = height (y-off set) of the function
        noise_range = uniform random distribution of noise from perfect gauss function
        x_range = absolute domain of the gaussian function 
        n_datapoints = total number of input datapoints of gaussian function
    Output: x_values,y_values
        x_values = the x-axial array of the gaussian function within the domain
        y_values = the y-axial array of the gaussian function within the domain
    """
    # Type check.
    center = valid.validate_float_value(center)
    std_dev = valid.validate_float_value(std_dev, greater_than=0)
    height = valid.validate_float_value(height)
    x_domain = valid.validate_float_array(x_domain, shape=(2,), size=2)
    noise_domain = valid.validate_float_array(noise_domain, shape=(2,), size=2)
    n_datapoints = valid.validate_int_value(n_datapoints, greater_than=0)

    # Generate the gaussian function and map to an output with the input
    # parameters.
    x_values, y_values = generate_gaussian(center, std_dev, height,
                                           x_domain=x_domain,
                                           n_datapoints=n_datapoints)

    # Imbue the gaussian with random noise.
    y_values = misc.generate_noise(y_values, noise_domain,
                                   distribution='uniform')

    return x_values, y_values
Example #4
0
def cloud_line_integral(field_function,
                        cloud_equation,
                        view_line_point,
                        box_width,
                        view_line_deltas=(1, 0, 0),
                        n_guesses=100):
    """
    This function computes the total summation of the line integrals given
    a field function that a single sightline passes through, given the 
    boundary that only the section of the line within a cloud would be 
    computed as it is the upper and lower bounds for the integral(s).
    """
    # Type check
    field_function = valid.validate_function_call(field_function,
                                                  n_parameters=3)
    cloud_equation = valid.validate_function_call(cloud_equation,
                                                  n_parameters=3)
    view_line_point = valid.validate_float_array(view_line_point, shape=(3, ))
    box_width = valid.validate_float_value(box_width, greater_than=0)
    view_line_deltas = valid.validate_tuple(view_line_deltas, length=3)
    n_guesses = valid.validate_int_value(n_guesses, greater_than=0)

    # Integrating function. Parameterize the field function to integrate over
    # the curve given by the sightline.
    # Define the sightline parametric equations.
    def x_param(t):
        return view_line_deltas[0] * t + view_line_point[0]

    def y_param(t):
        return view_line_deltas[1] * t + view_line_point[1]

    def z_param(t):
        return view_line_deltas[2] * t + view_line_point[2]

    # Assume that the user's function accepts x,y,z in that order.
    def parameterized_field_equation(t):
        return field_function(x_param(t), y_param(t), z_param(t))

    # Determine the lower and upper bounds of the parameterized functional
    # integrations.
    lower_bounds,upper_bounds = \
        line_integral_boundaries(view_line_point,cloud_equation,box_width,
                                 view_line_deltas,n_guesses)

    # The total integrated number.
    integrated_value = 0
    error = []  # Error array
    for lowerdex, upperdex in zip(lower_bounds, upper_bounds):
        integration = sp.integrate.quad(parameterized_field_equation, lowerdex,
                                        upperdex)
        integrated_value += integration[0]
        error.append(integration[1])

    # Errors add in quadrature.
    error = np.sqrt(np.dot(error, error))

    return integrated_value, error
Example #5
0
def Ewer_Basu__eigenvalues(index_m, radius):
    """
    This is the values of the eigenvalues of some integer index m as it
    pertains to Ewertiwski & Basu 2013.
    """
    # Type check.
    index_m = valid.validate_int_value(index_m, greater_than=0)
    radius = valid.validate_float_value(radius)

    eigenvalue = (bessel_zeros(1, index_m) / radius)**2

    return valid.validate_float_value(eigenvalue)
def generate_noisy_dual_dimension_gaussian(centers,
                                           std_devs,
                                           height,
                                           n_datapoints,
                                           x_domain,
                                           y_domain,
                                           noise_domain,
                                           dimensions=2):
    """
    This generates a noisy 2D gaussian.
    """

    # Type check
    dimensions = valid.validate_int_value(dimensions, greater_than=0)
    centers = valid.validate_float_array(centers,
                                         shape=(dimensions, ),
                                         size=dimensions)
    std_devs = valid.validate_float_array(std_devs,
                                          shape=(dimensions, ),
                                          size=dimensions,
                                          deep_validate=True,
                                          greater_than=0)
    height = valid.validate_float_value(height)
    n_datapoints = valid.validate_int_value(n_datapoints, greater_than=0)
    x_domain = valid.validate_float_array(x_domain, shape=(2, ), size=2)
    y_domain = valid.validate_float_array(y_domain, shape=(2, ), size=2)
    noise_domain = valid.validate_float_array(noise_domain,
                                              shape=(2, ),
                                              size=2)

    # Generate the 2D gaussian.
    points = generate_dual_dimension_gaussian(centers, std_devs, height,
                                              n_datapoints, x_domain, y_domain)

    # Imbue the z points (2 index) with noise.
    points[2] = misc.generate_noise(points[2], noise_domain)

    return points
def generate_dual_dimension_gaussian(centers,
                                     std_devs,
                                     height,
                                     n_datapoints,
                                     x_domain,
                                     y_domain,
                                     dimensions=2):
    """
    This generates random points for a 2D dimensional gaussian. 
    """

    # Type check
    dimensions = valid.validate_int_value(dimensions, greater_than=0)
    centers = valid.validate_float_array(centers,
                                         shape=(dimensions, ),
                                         size=dimensions)
    std_devs = valid.validate_float_array(std_devs,
                                          shape=(dimensions, ),
                                          size=dimensions,
                                          deep_validate=True,
                                          greater_than=0)
    height = valid.validate_float_value(height)
    n_datapoints = valid.validate_int_value(n_datapoints, greater_than=0)
    x_domain = valid.validate_float_array(x_domain, shape=(2, ), size=2)
    y_domain = valid.validate_float_array(y_domain, shape=(2, ), size=2)

    # Generate x and y points at random.
    x_values = np.random.uniform(x_domain[0], x_domain[-1], size=n_datapoints)
    y_values = np.random.uniform(y_domain[0], y_domain[-1], size=n_datapoints)

    # Compile into a parallel pair of (x,y)
    input_points = np.append([x_values], [y_values], axis=0)

    # Compute the z_values, desire only the output points.
    z_values, output_points = dual_dimensional_gauss_equation(
        input_points, centers, std_devs, height, dimensions)

    return output_points
def fit_bessel_function_2nd(x_points,
                            y_points,
                            order_guess=None,
                            order_bounds=None):
    """
    This function returns the order of a Bessel function of the second kind 
    that fits the data points according to a least squares fitting algorithm.

    Input:
    x_points = The x values of the points to fit.
    y_points = The y values of the points to fit.
    order_guess = A starting point for order guessing.
    order_bounds = The min and max values the order can be.

    Output:
    fit_order = The value of the order of the fit bessel function.
    """
    # The total number of points, useful.
    n_datapoints = len(x_points)
    n_datapoints = valid.validate_int_value(n_datapoints, greater_than=0)
    if (n_datapoints <= 1):
        raise InputError('It does not make sense to fit one or less points.'
                         '    --Kyubey')

    # Type check
    x_points = valid.validate_float_array(x_points, size=n_datapoints)
    y_points = valid.validate_float_array(y_points, size=n_datapoints)
    if (order_guess is not None):
        order_guess = valid.validate_float_value(order_guess)
    else:
        order_guess = 1
    if (order_bounds is not None):
        order_bounds = valid.validate_float_array(order_bounds,
                                                  shape=(2, ),
                                                  size=2)
    else:
        order_bounds = (-np.inf, np.inf)

    # Function fitting, Scipy's module is likely the better method to go.
    fit_parameters = sp_opt.curve_fit(bessel_function_2nd,
                                      x_points,
                                      y_points,
                                      p0=order_guess,
                                      bounds=order_bounds)

    # Split the fitted order and covariance array.
    fit_order = float(fit_parameters[0])
    covariance = float(fit_parameters[1])

    return fit_order, covariance
def dual_dimensional_gauss_equation(input_points, center, std_dev, height,
                                    dimensions):
    """
    This function generates gaussians of multiple dimensions/variables given
    the center's coordinates and the covariance matrix.
    """
    try:
        n_datapoints = len(input_points[0])
    except:
        input_points = valid.validate_float_array(input_points)
        n_datapoints = len(input_points[0])

    # Validate, dimensions must go first.
    dimensions = valid.validate_int_value(dimensions, greater_than=0)
    input_points = valid.validate_float_array(input_points,
                                              shape=(2, n_datapoints))
    center = valid.validate_float_array(center,
                                        shape=(dimensions, ),
                                        size=dimensions)
    std_dev = valid.validate_float_array(std_dev,
                                         shape=(dimensions, ),
                                         size=dimensions,
                                         deep_validate=True,
                                         greater_than=0)
    height = valid.validate_float_value(height)

    # For two dimensions.
    normalization_term = 1 / (2 * np.pi * std_dev[0] * std_dev[1])

    exp_x_term = (input_points[0] - center[0])**2 / (2 * std_dev[0]**2)
    exp_y_term = (input_points[1] - center[1])**2 / (2 * std_dev[1]**2)

    z_points = normalization_term * np.exp(-(exp_x_term + exp_y_term)) + height

    output_points = np.append(input_points, np.array([z_points]), axis=0)

    return z_points, output_points
def dual_dimensional_gauss_equation_rot(input_points, centers, std_devs,
                                        height, theta, dimensions):
    """
    This is the general gaussian equation for a rotatable gaussian for some 
    angle theta (in radians).
    """
    try:
        n_datapoints = len(input_points[0])
    except:
        input_points = valid.validate_float_array(input_points)
        n_datapoints = len(input_points[0])

    # Validate, dimensions must go first.
    dimensions = valid.validate_int_value(dimensions, greater_than=0)
    input_points = valid.validate_float_array(input_points,
                                              shape=(2, n_datapoints))
    centers = valid.validate_float_array(centers,
                                         shape=(dimensions, ),
                                         size=dimensions)
    std_devs = valid.validate_float_array(std_devs,
                                          shape=(dimensions, ),
                                          size=dimensions,
                                          deep_validate=True,
                                          greater_than=0)
    height = valid.validate_float_value(height)
    # Adapt for over/under rotation of theta.
    try:
        theta = valid.validate_float_value(theta,
                                           greater_than=0,
                                           less_than=2 * np.pi)
    except ValueError:
        # A loop is to be done. Have an insurance policy.
        loopbreak = 0
        while ((theta < 0) or (theta > 2 * np.pi)):
            if (theta < 0):
                theta += 2 * np.pi
            elif (theta > 0):
                theta = theta % (2 * np.pi)
            # Ensure that the loop does not get stuck in the event of
            # unpredicted behavior.
            loopbreak += 1
            if (loopbreak > 100):
                raise InputError('The value of theta cannot be '
                                 'nicely confined to 0 <= θ <= 2π '
                                 '    --Kyubey')

    # Following Wikipedia's parameter definitions.
    a = ((np.cos(theta)**2 / (2 * std_devs[0]**2)) + (np.sin(theta)**2 /
                                                      (2 * std_devs[1]**2)))
    b = (-(np.sin(2 * theta) / (4 * std_devs[0]**2)) + (np.sin(2 * theta) /
                                                        (4 * std_devs[1]**2)))
    c = ((np.sin(theta)**2 / (2 * std_devs[0]**2)) + (np.cos(theta)**2 /
                                                      (2 * std_devs[1]**2)))

    # Amplitude or normalization
    normalization_term = 1 / (2 * np.pi * std_devs[0] * std_devs[1])

    # General equation
    z_values = (normalization_term *
                np.exp(-(a * (input_points[0] - centers[0])**2 +
                         (2 * b * ((input_points[0] - centers[0]) *
                                   (input_points[1] - centers[1]))) +
                         (c * (input_points[1] - centers[1])**2))))

    # Return values.
    output_points = np.append(input_points, np.array([z_values]), axis=0)

    return z_values, output_points
def generate_multigaussian(center_list, std_dev_list, height_list,
                           x_domain, gaussian_count=None,
                           n_datapoints=None):
    """
    Generates a multigaussian arrangement of datapoints.
    """

    # Assume the center list is the highest priority (but check the
    # std_dev) for double checking the gaussian count.
    if (gaussian_count is None):
        gaussian_count = len(center_list)
        # Double check with std_dev
        if ((gaussian_count != len(std_dev_list)) or
                (len(std_dev_list) != len(center_list))):
            raise InputError('The number of gaussians to generate is not '
                             'known, nor can it be accurately derived from '
                             'the inputs given.    --Kyubey')

    # Type check
    gaussian_count = valid.validate_int_value(gaussian_count, greater_than=0)
    center_list = valid.validate_float_array(center_list, size=gaussian_count)
    std_dev_list = valid.validate_float_array(std_dev_list,
                                              size=gaussian_count,
                                              deep_validate=True,
                                              greater_than=0)
    height_list = valid.validate_float_array(height_list, size=gaussian_count)
    x_domain = valid.validate_float_array(x_domain,
                                          shape=(2,),
                                          size=2)
    n_datapoints = valid.validate_int_value(n_datapoints)

    # Initial parameters.
    x_values = np.random.uniform(x_domain[0], x_domain[-1],
                                 size=n_datapoints)
    y_values = []

    # Compile the parameters into a concentric list for the usage of the
    # envelope function.
    parameters = []
    for gaussiandex in range(gaussian_count):
        temp_parameter_dict = {'center': center_list[gaussiandex],
                               'std_dev': std_dev_list[gaussiandex],
                               'height': height_list[gaussiandex]}
        parameters.append(temp_parameter_dict)
    parameters = tuple(parameters)

    # Compile the list of functions for the concentric list. As this is multi-
    # gaussian fitting, it is expected to only be gaussian functions.
    functions = []
    for gaussiandex in range(gaussian_count):
        functions.append(gaussian_function)
    functions = tuple(functions)

    # Execute the envelope function.
    y_values = misc.generate_function_envelope(x_values, functions, parameters)

    # Sort the values.
    sort_index = np.argsort(x_values)
    x_values = x_values[sort_index]
    y_values = y_values[sort_index]

    return np.array(x_values, dtype=float), np.array(y_values, dtype=float)
def fit_multigaussian(x_values, y_values,
                      gaussian_count=None,
                      window_len_ratio=0.1, sg_polyorder=3,
                      prominence=0.10,
                      *args, **kwargs):
    """
    Fit a gaussian function with 3 degrees of freedom but with many gaussians.

    Input:
        x_values = the x-axial array of the values
        y_values = the y-axial array of the values
        gaussian_count = the number of expected gaussian functions
        fft_keep = the percentage kept by the fft truncation, use a lower 
            fft_keep if there is a lot of noise
        prom_height_ratio = the ratio of prominence to height for width 
            detection, a lower value increases accuracy until there are too
            little patterns.


    Returns center_array,std_dev_array,height_array,covariance_array
        center_array = the central value of the gaussian
        std_dev_array = the standard deviation of the gaussian
        height_array = the height of the gaussian function along the x-axis
        covariance_array = a convariance matrix of the fit
    """
    # The total number of points, useful.
    try:
        n_datapoints = len(x_values)
    except:
        raise InputError('It does not make sense to try and fit a '
                         'single point.'
                         '    --Kyubey')
    else:
        n_datapoints = valid.validate_int_value(n_datapoints, greater_than=0)

    # Initial variables.
    center_array = []
    std_dev_array = []
    height_array = []
    covariance_array = []

    # Type check.
    x_values = valid.validate_float_array(x_values, size=n_datapoints)
    y_values = valid.validate_float_array(y_values, size=n_datapoints)
    if (gaussian_count is not None):
        # Gaussian count can't be less than 0.
        gaussian_count = valid.validate_int_value(
            gaussian_count, greater_than=0)
    window_len_ratio = valid.validate_float_value(window_len_ratio)
    sg_polyorder = valid.validate_int_value(sg_polyorder)
    prominence = valid.validate_float_value(prominence)

    # Implement the Savitzky-Golay filtering algorithm.
    # Window width needs to be an odd interger by Scipy and algorithm
    # stipulation.
    window_width = int(window_len_ratio * n_datapoints)
    if (window_width % 2 == 0):
        # It is even, make odd.
        window_width += 1
    elif (window_width % 2 == 1):
        # It is odd, it should be good.
        pass

    filtered_y_values = sp_sig.savgol_filter(y_values,
                                             window_width,
                                             sg_polyorder)

    # Detect possible peaks of Gaussian functions.
    peak_index, peak_properties = \
        sp_sig.find_peaks(filtered_y_values, prominence=prominence)
    left_bases = peak_properties['left_bases']
    right_bases = peak_properties['right_bases']

    # Attempt to fit a gaussian curve between the ranges of each peak.
    for peakdex, left_basedex, right_basedex in \
            zip(peak_index, left_bases, right_bases):
        # Separate each of the gaussians and try to find parameters.
        center, std_dev, height, covariance = \
            fit_gaussian(x_values[left_basedex:right_basedex],
                         y_values[left_basedex:right_basedex],
                         center_guess=x_values[peakdex])

        # Append the values to the arrays of information.
        center_array.append(center)
        std_dev_array.append(std_dev)
        height_array.append(height)
        covariance_array.append(covariance)

    # Type check before returning, just in case.
    center_array = valid.validate_float_array(center_array)
    std_dev_array = valid.validate_float_array(std_dev_array)
    height_array = valid.validate_float_array(height_array)
    covariance_array = valid.validate_float_array(covariance_array)

    return center_array, std_dev_array, height_array, covariance_array
def fit_gaussian(x_values, y_values,
                 center_guess=None, std_dev_guess=None, height_guess=None,
                 center_bounds=None, std_dev_bounds=None, height_bounds=None):
    """
    Fit a gaussian function with 3 degrees of freedom.

    Input:
        x_values = the x-axial array of the values
        y_values = the y-axial array of the values
        center_guess = a starting point for the center
        std_dev_guess = a starting point for the std_dev
        height_guess = a starting point for the height

    Returns center,std_dev,height,covariance
        center = the central value of the gaussian
        std_dev = the standard deviation of the gaussian
        height = the height of the gaussian function along the x-axis
        covariance = a convariance matrix of the fit
    """
    # The total number of points, useful.
    try:
        n_datapoints = len(x_values)
    except:
        raise InputError('It does not make sense to try and fit a '
                         'single point.'
                         '    --Kyubey')
    else:
        n_datapoints = valid.validate_int_value(n_datapoints, greater_than=0)

    # Type check
    x_values = valid.validate_float_array(x_values)
    y_values = valid.validate_float_array(y_values)

    # Type check optional issues.
    # Type check the guesses
    if (center_guess is not None):
        center_guess = valid.validate_float_value(center_guess)
    else:
        # The default of scipy's curve fit.
        center_guess = 1
    if (std_dev_guess is not None):
        std_dev_guess = valid.validate_float_value(
            std_dev_guess, greater_than=0)
    else:
        # The default of scipy's curve fit.
        std_dev_guess = 1
    if (height_guess is not None):
        height_guess = valid.validate_float_value(height_guess)
    else:
        # The default of scipy's curve fit.
        height_guess = 1
    # Type check bounds.
    if (center_bounds is not None):
        center_bounds = valid.validate_float_array(center_bounds, size=2)
        center_bounds = np.sort(center_bounds)
    else:
        center_bounds = np.array([-np.inf, np.inf])
    if (std_dev_bounds is not None):
        std_dev_bounds = valid.validate_float_array(std_dev_bounds, size=2,
                                                    deep_validate=True,
                                                    greater_than=0)
        std_dev_bounds = np.sort(std_dev_bounds)
    else:
        std_dev_bounds = np.array([0, np.inf])
    if (height_bounds is not None):
        height_bounds = valid.validate_float_array(height_bounds)
        height_bounds = np.sort(height_bounds)
    else:
        height_bounds = np.array([-np.inf, np.inf])

    # Compiling the guesses.
    guesses = np.array([center_guess, std_dev_guess, height_guess])

    # Compiling the bounds
    lower_bounds = (center_bounds[0], std_dev_bounds[0], height_bounds[0])
    upper_bounds = (center_bounds[1], std_dev_bounds[1], height_bounds[1])
    bounds = (lower_bounds, upper_bounds)

    # Use scipy's curve optimization function for the gaussian function.
    fit_parameters, covariance = sp_opt.curve_fit(gaussian_function,
                                                  x_values, y_values,
                                                  p0=guesses, bounds=bounds)

    # For ease.
    center = fit_parameters[0]
    std_dev = fit_parameters[1]
    height = fit_parameters[2]

    return center, std_dev, height, covariance
def generate_noisy_multigaussian(center_list, std_dev_list, height_list,
                                 noise_domain_list, x_domain, n_datapoints,
                                 gaussian_count=None, cumulative_noise=False):
    """
    Generate multiple gaussians with some aspect of noise within one 
    dataset.

    Input:
    center_list = list of central x values
    std_dev_list = list of standard deviations of the functions
    height_list = list of heights (y-off set) of the functions
    noise_domain_list = list of uniform random distribution of noise 
        from perfect gauss function
    x_domain_list = absolute domains of the gaussian functions
    n_datapoints = total number of datapoints
    n_datapoints_list = list of number of datapoints (overrides 
        n_datapoints)
    gaussian_count = the number of gaussian functions to be made
    cumulative_noise = if each gaussian has noise (True), or just the 
         entire set (False).

    Output: x_values,y_values
    x_values = the x-axial array of the gaussian function within the 
        domain
    y_values = the y-axial array of the gaussian function within the 
        domain
    """

    # Assume the center list is the highest priority (but check the
    # std_dev) for double checking the gaussian count.
    if (gaussian_count is None):
        gaussian_count = len(center_list)
        # Double check with std_dev
        if ((gaussian_count != len(std_dev_list)) or
                (len(std_dev_list) != len(center_list))):
            raise InputError('The number of gaussians to generate is not '
                             'known, nor can it be accurately derived from '
                             'the inputs given.    --Kyubey')

    # Type check.
    center_list = valid.validate_float_array(center_list, size=gaussian_count)
    std_dev_list = valid.validate_float_array(
        std_dev_list, size=gaussian_count)
    height_list = valid.validate_float_array(height_list, size=gaussian_count)
    noise_domain_list = valid.validate_float_array(noise_domain_list,
                                                   shape=(gaussian_count, 2))
    x_domain = valid.validate_float_array(x_domain,
                                          shape=(2,), size=2)
    cumulative_noise = valid.validate_boolean_value(cumulative_noise)
    n_datapoints = valid.validate_int_value(n_datapoints, greater_than=0)

    # Type check optional elements
    gaussian_count = valid.validate_int_value(gaussian_count, greater_than=0)
    cumulative_noise = valid.validate_boolean_value(cumulative_noise)

    # Initialize initial variables.
    x_values = []
    y_values = []

    # Check how to distribute noise.
    if (cumulative_noise):
        # Each gaussian must be generated on its own.
        for gaussiandex in range(gaussian_count):
            # Generate gaussian.
            temp_x_values, temp_y_values = \
                generate_gaussian(center_list[gaussiandex],
                                  std_dev_list[gaussiandex],
                                  height_list[gaussiandex],
                                  x_domain,
                                  np.ceil(n_datapoints / gaussian_count))
            temp_y_values = misc.generate_noise(temp_y_values,
                                                noise_domain_list[gaussiandex],
                                                distribution='uniform')
            # Store for return
            x_values = np.append([x_values], [temp_x_values], axis=0)
            y_values = np.append([y_values], [temp_y_values], axis=0)

        # Maximize the values, discarding everything lower than.
        x_values = np.amax(x_values, axis=0)
        y_values = np.amax(y_values, axis=0)

    else:
        # Generate noise of every point after gaussian generation.
        # Generate gaussian
        x_values, y_values = generate_multigaussian(center_list, std_dev_list,
                                                    height_list, x_domain,
                                                    gaussian_count,
                                                    np.ceil(n_datapoints /
                                                            gaussian_count))

        # Generate noise. Warn the user that only the first noise domain is
        # being used.
        kyubey_warning(OutputWarning, ('Only the first element of the '
                                       'noise_domian_list is used if '
                                       'cumulative_noise is False.'
                                       '    --Kyubey'))
        y_values = misc.generate_noise(y_values, noise_domain_list[0],
                                       distribution='uniform')

    return np.array(x_values, dtype=float), np.array(y_values, dtype=float)
def fit_bessel_function_1st_integer(x_values,
                                    y_values,
                                    max_order=1000,
                                    negative_order=False):
    """
    This is the main bessel fitting algorithm. It can fit, and then extract, 
    the order of the bessel function according to the best fit of the data 
    points.

    However, it can only do integer orders, and will not attempt float orders or
    complex orders.

    Input:
    x_values = Input x values
    y_values = Input y values
    max_order = The absolute value of the max order that is to be checked.
    negative_order = If the user prefers the negative order if the positive and
        negative order is the same.

    Output:
    fit_order = The primary positive fit order
    """

    # The total number of points, useful.
    n_datapoints = len(x_values)
    n_datapoints = valid.validate_int_value(n_datapoints, greater_than=0)
    if (n_datapoints <= 1):
        raise InputError('It does not make sense to fit one or less points.'
                         '    --Kyubey')

    # Type check
    x_values = valid.validate_float_array(x_values, size=n_datapoints)
    y_values = valid.validate_float_array(y_values, size=n_datapoints)
    negative_order = valid.validate_boolean_value(negative_order)

    # Because an order of 0 is a thing.
    searchable_orders = range(max_order + 1)

    # Generate list of output values, given inputs and searching through all
    # given possible orders.
    output_ranges = []
    for orderdex in searchable_orders:
        output_ranges.append(bessel_function_1st(x_values, orderdex))

    # Copy the positive domain and transpose to the negative order domain via
    # the relationship of non-linearity of Bessel functions of the the first
    # kind of interger order.
    positive_order_outputs = np.array(output_ranges)
    negative_order_outputs = np.array(output_ranges)
    for orderdex, output_array in enumerate(negative_order_outputs):
        negative_order_outputs[orderdex] *= (-1)**orderdex

    # Use the least squares fitting algorithm.

    # Define square residuals for both positive and negative.
    positive_order_sq_residuals = \
        np.sum((y_values - positive_order_outputs[:])**2, axis=1)
    negative_order_sq_residuals = \
        np.sum((y_values - negative_order_outputs[:])**2, axis=1)

    # Find the lowest square residual of both the positive and negative values.
    positive_order_fit = np.argmin(positive_order_sq_residuals)
    negative_order_fit = np.argmin(negative_order_sq_residuals)

    # It might be the case that the values of the negative and the positive
    # order is the same via the non-linearity relationship, if so, send the
    # positive value if the negative order is not wanted.
    if (positive_order_fit == negative_order_fit):
        if (negative_order):
            return int(negative_order_fit)
        else:
            return int(positive_order_fit)
    elif (positive_order_sq_residuals[positive_order_fit] <
          negative_order_sq_residuals[negative_order_fit]):
        return int(positive_order_fit)
    elif (positive_order_sq_residuals[positive_order_fit] >
          negative_order_sq_residuals[negative_order_fit]):
        return int(negative_order_fit)
    else:
        raise OutputError('There seems to be something wrong, it is not known'
                          'what is wrong.'
                          '    --Kyubey')