def __init__(self, failures, failure_stress, right_censored=None, right_censored_stress=None, print_results=True, show_plot=True, common_shape_method='BIC'):

        # input type checking and converting to arrays in preperation for creation of dataframe
        if common_shape_method not in ['BIC', 'weighted_average', 'average']:
            raise ValueError('common_shape_method must be either BIC, weighted_average, or average. Default is BIC.')
        if len(failures) != len(failure_stress):
            raise ValueError('The length of failures does not match the length of failure_stress')
        if type(failures) is list:
            failures = np.array(failures)
        elif type(failures) is np.ndarray:
            pass
        else:
            raise ValueError('failures must be an array or list')
        if type(failure_stress) is list:
            failure_stress = np.array(failure_stress)
        elif type(failure_stress) is np.ndarray:
            pass
        else:
            raise ValueError('failure_stress must be an array or list')
        if right_censored is not None:
            if len(right_censored) != len(right_censored_stress):
                raise ValueError('The length of right_censored does not match the length of right_censored_stress')
            if type(right_censored) is list:
                right_censored = np.array(right_censored)
            elif type(right_censored) is np.ndarray:
                pass
            else:
                raise ValueError('right_censored must be an array or list')
            if type(right_censored_stress) is list:
                right_censored_stress = np.array(right_censored_stress)
            elif type(right_censored_stress) is np.ndarray:
                pass
            else:
                raise ValueError('right_censored_stress must be an array or list')

        delta = max(failures) - min(failures)
        xmin = min(failures) - delta * 0.2
        xmax = max(failures) + delta * 0.2
        xvals = np.linspace(xmin, xmax, 100)

        if right_censored is not None:
            TIMES = np.hstack([failures, right_censored])
            STRESS = np.hstack([failure_stress, right_censored_stress])
            CENS_CODES = np.hstack([np.ones_like(failures), np.zeros_like(right_censored)])
        else:
            TIMES = failures
            STRESS = failure_stress
            CENS_CODES = np.ones_like(failures)

        data = {'times': TIMES, 'stress': STRESS, 'cens_codes': CENS_CODES}
        df = pd.DataFrame(data, columns=['times', 'stress', 'cens_codes'])
        df_sorted = df.sort_values(by=['cens_codes', 'stress', 'times'])
        is_failure = df_sorted['cens_codes'] == 1
        is_right_cens = df_sorted['cens_codes'] == 0
        f_df = df_sorted[is_failure]
        rc_df = df_sorted[is_right_cens]
        unique_stresses_f = f_df.stress.unique()
        if right_censored is not None:
            unique_stresses_rc = rc_df.stress.unique()
            for item in unique_stresses_rc:  # check that there are no unique right_censored stresses that are not also in failure stresses
                if item not in unique_stresses_f:
                    raise ValueError('The right_censored_stress array contains values that are not in the failure_stress array. This is equivalent to trying to fit a distribution to only censored data and cannot be done.')

        normal_fit_mu_array = []
        normal_fit_sigma_array = []
        normal_fit_mu_array_common_shape = []
        color_list = ['steelblue', 'darkorange', 'red', 'green', 'purple', 'blue', 'grey', 'deeppink', 'cyan', 'chocolate']
        weights_array = []
        # within this loop, each list of failures and right censored values will be unpacked for each unique stress to find the common sigma parameter
        for stress in unique_stresses_f:
            failure_current_stress_df = f_df[f_df['stress'] == stress]
            FAILURES = failure_current_stress_df['times'].values
            len_f = len(FAILURES)
            if right_censored is not None:
                if stress in unique_stresses_rc:
                    right_cens_current_stress_df = rc_df[rc_df['stress'] == stress]
                    RIGHT_CENSORED = right_cens_current_stress_df['times'].values
                    len_rc = len(RIGHT_CENSORED)
                else:
                    RIGHT_CENSORED = None
                    len_rc = 0
            else:
                RIGHT_CENSORED = None
                len_rc = 0

            weights_array.append(len_f + len_rc)
            normal_fit = Fit_Normal_2P(failures=FAILURES, right_censored=RIGHT_CENSORED, show_probability_plot=False, print_results=False)
            normal_fit_mu_array.append(normal_fit.mu)
            normal_fit_sigma_array.append(normal_fit.sigma)
        common_shape_guess = np.average(normal_fit_sigma_array)

        def __BIC_minimizer(common_shape_X): #lgtm [py/similar-function]
            '''
            __BIC_minimizer is used by the minimize function to get the sigma which gives the lowest overall BIC
            '''
            BIC_tot = 0
            for stress in unique_stresses_f:
                failure_current_stress_df = f_df[f_df['stress'] == stress]
                FAILURES = failure_current_stress_df['times'].values
                if right_censored is not None:
                    if stress in unique_stresses_rc:
                        right_cens_current_stress_df = rc_df[rc_df['stress'] == stress]
                        RIGHT_CENSORED = right_cens_current_stress_df['times'].values
                    else:
                        RIGHT_CENSORED = None
                else:
                    RIGHT_CENSORED = None
                normal_fit_common_shape = Fit_Normal_2P(failures=FAILURES, right_censored=RIGHT_CENSORED, show_probability_plot=False, print_results=False, force_sigma=common_shape_X)
                BIC_tot += normal_fit_common_shape.BIC
            return BIC_tot

        if common_shape_method == 'BIC':
            optimized_sigma_results = minimize(__BIC_minimizer, x0=common_shape_guess, method='nelder-mead')
            common_shape = optimized_sigma_results.x[0]
        elif common_shape_method == 'weighted_average':
            total_data = sum(weights_array)
            weights = np.array(weights_array) / total_data
            common_shape = sum(weights * np.array(normal_fit_sigma_array))
        elif common_shape_method == 'average':
            common_shape = common_shape_guess  # this was just the numerical average obtained above
        self.common_shape = common_shape

        # within this loop, each list of failures and right censored values will be unpacked for each unique stress and plotted as a probability plot as well as the CDF of the common sigma plot
        AICc_total = 0
        BIC_total = 0
        AICc = True
        for i, stress in enumerate(unique_stresses_f):
            failure_current_stress_df = f_df[f_df['stress'] == stress]
            FAILURES = failure_current_stress_df['times'].values
            if right_censored is not None:
                if stress in unique_stresses_rc:
                    right_cens_current_stress_df = rc_df[rc_df['stress'] == stress]
                    RIGHT_CENSORED = right_cens_current_stress_df['times'].values
                else:
                    RIGHT_CENSORED = None
            else:
                RIGHT_CENSORED = None
            normal_fit_common_shape = Fit_Normal_2P(failures=FAILURES, right_censored=RIGHT_CENSORED, show_probability_plot=False, print_results=False, force_sigma=common_shape)
            normal_fit_mu_array_common_shape.append(normal_fit_common_shape.mu[0])
            if type(normal_fit_common_shape.AICc) == str:
                AICc = False
            else:
                AICc_total += normal_fit_common_shape.AICc
            BIC_total += normal_fit_common_shape.BIC
            if show_plot is True:
                normal_fit_common_shape.distribution.CDF(linestyle='--', color=color_list[i], xvals=xvals)
                Probability_plotting.Normal_probability_plot(failures=FAILURES, right_censored=RIGHT_CENSORED, color=color_list[i], label=str(stress))
                plt.legend(title='Stress')
                plt.xlim(xmin, xmax)
                if common_shape_method == 'BIC':
                    plt.title(str('ALT Normal Probability Plot\nOptimal BIC ' + r'$\sigma$ = ' + str(round(common_shape, 4))))
                elif common_shape_method == 'weighted_average':
                    plt.title(str('ALT Normal Probability Plot\nWeighted average ' + r'$\sigma$ = ' + str(round(common_shape, 4))))
                elif common_shape_method == 'average':
                    plt.title(str('ALT Normal Probability Plot\nAverage ' + r'$\sigma$ = ' + str(round(common_shape, 4))))

        self.BIC_sum = np.sum(BIC_total)
        if AICc is True:
            self.AICc_sum = np.sum(AICc_total)
        else:
            self.AICc_sum = 'Insufficient Data'
        sigma_difs = (common_shape - np.array(normal_fit_sigma_array)) / np.array(normal_fit_sigma_array)
        sigma_differences = []
        for item in sigma_difs:
            if item > 0:
                sigma_differences.append(str('+' + str(round(item * 100, 2)) + '%'))
            else:
                sigma_differences.append(str(str(round(item * 100, 2)) + '%'))
        results = {'stress': unique_stresses_f, 'original mu': normal_fit_mu_array, 'original sigma': normal_fit_sigma_array, 'new mu': normal_fit_mu_array_common_shape, 'common sigma': np.ones_like(unique_stresses_f) * common_shape, 'sigma change': sigma_differences}
        results_df = pd.DataFrame(results, columns=['stress', 'original mu', 'original sigma', 'new mu', 'common sigma', 'sigma change'])
        blankIndex = [''] * len(results_df)
        results_df.index = blankIndex
        self.results = results_df
        if print_results is True:
            pd.set_option('display.width', 200)  # prevents wrapping after default 80 characters
            pd.set_option('display.max_columns', 9)  # shows the dataframe without ... truncation
            print('\nALT Normal probability plot results:')
            print(self.results)
            print('Total AICc:', self.AICc_sum)
            print('Total BIC:', self.BIC_sum)
    def __init__(self, failures, failure_stress, right_censored=None, right_censored_stress=None, print_results=True, show_plot=True):

        # input type checking and converting to arrays in preperation for creation of dataframe
        if len(failures) != len(failure_stress):
            raise ValueError('The length of failures does not match the length of failure_stress')
        if type(failures) is list:
            failures = np.array(failures)
        elif type(failures) is np.ndarray:
            pass
        else:
            raise ValueError('failures must be an array or list')
        if type(failure_stress) is list:
            failure_stress = np.array(failure_stress)
        elif type(failure_stress) is np.ndarray:
            pass
        else:
            raise ValueError('failure_stress must be an array or list')
        if right_censored is not None:
            if len(right_censored) != len(right_censored_stress):
                raise ValueError('The length of right_censored does not match the length of right_censored_stress')
            if type(right_censored) is list:
                right_censored = np.array(right_censored)
            elif type(right_censored) is np.ndarray:
                pass
            else:
                raise ValueError('right_censored must be an array or list')
            if type(right_censored_stress) is list:
                right_censored_stress = np.array(right_censored_stress)
            elif type(right_censored_stress) is np.ndarray:
                pass
            else:
                raise ValueError('right_censored_stress must be an array or list')

        xmin = np.floor(np.log10(min(failures))) - 1
        xmax = np.ceil(np.log10(max(failures))) + 1
        xvals = np.logspace(xmin, xmax, 100)

        if right_censored is not None:
            TIMES = np.hstack([failures, right_censored])
            STRESS = np.hstack([failure_stress, right_censored_stress])
            CENS_CODES = np.hstack([np.ones_like(failures), np.zeros_like(right_censored)])
        else:
            TIMES = failures
            STRESS = failure_stress
            CENS_CODES = np.ones_like(failures)

        data = {'times': TIMES, 'stress': STRESS, 'cens_codes': CENS_CODES}
        df = pd.DataFrame(data, columns=['times', 'stress', 'cens_codes'])
        df_sorted = df.sort_values(by=['cens_codes', 'stress', 'times'])
        is_failure = df_sorted['cens_codes'] == 1
        is_right_cens = df_sorted['cens_codes'] == 0
        f_df = df_sorted[is_failure]
        rc_df = df_sorted[is_right_cens]
        unique_stresses_f = f_df.stress.unique()
        if right_censored is not None:
            unique_stresses_rc = rc_df.stress.unique()
            for item in unique_stresses_rc:  # check that there are no unique right_censored stresses that are not also in failure stresses
                if item not in unique_stresses_f:
                    raise ValueError('The right_censored_stress array contains values that are not in the failure_stress array. This is equivalent to trying to fit a distribution to only censored data and cannot be done.')

        weibull_fit_alpha_array = []
        weibull_fit_beta_array = []
        expon_fit_lambda_array = []
        color_list = ['steelblue', 'darkorange', 'red', 'green', 'purple', 'blue', 'grey', 'deeppink', 'cyan', 'chocolate']
        # within this loop, each list of failures and right censored values will be unpacked for each unique stress to find the common beta parameter
        for stress in unique_stresses_f:
            failure_current_stress_df = f_df[f_df['stress'] == stress]
            FAILURES = failure_current_stress_df['times'].values
            if right_censored is not None:
                if stress in unique_stresses_rc:
                    right_cens_current_stress_df = rc_df[rc_df['stress'] == stress]
                    RIGHT_CENSORED = right_cens_current_stress_df['times'].values
                else:
                    RIGHT_CENSORED = None
            else:
                RIGHT_CENSORED = None

            weibull_fit = Fit_Weibull_2P(failures=FAILURES, right_censored=RIGHT_CENSORED, show_probability_plot=False, print_results=False)
            weibull_fit_alpha_array.append(weibull_fit.alpha)
            weibull_fit_beta_array.append(weibull_fit.beta)

        # within this loop, each list of failures and right censored values will be unpacked for each unique stress and plotted as a probability plot as well as the CDF of the common beta plot
        AICc_total = 0
        BIC_total = 0
        AICc_total_weib = 0
        BIC_total_weib = 0
        AICc = True
        AICc_weib = True
        for i, stress in enumerate(unique_stresses_f):
            failure_current_stress_df = f_df[f_df['stress'] == stress]
            FAILURES = failure_current_stress_df['times'].values
            if right_censored is not None:
                if stress in unique_stresses_rc:
                    right_cens_current_stress_df = rc_df[rc_df['stress'] == stress]
                    RIGHT_CENSORED = right_cens_current_stress_df['times'].values
                else:
                    RIGHT_CENSORED = None
            else:
                RIGHT_CENSORED = None
            expon_fit = Fit_Expon_1P(failures=FAILURES, right_censored=RIGHT_CENSORED, show_probability_plot=False, print_results=False)
            weib_fit = Fit_Weibull_2P(failures=FAILURES, right_censored=RIGHT_CENSORED, show_probability_plot=False, print_results=False, force_beta=np.average(weibull_fit_beta_array))
            expon_fit_lambda_array.append(expon_fit.Lambda)
            if type(expon_fit.AICc) == str:
                AICc = False
            else:
                AICc_total += expon_fit.AICc
            if type(weib_fit.AICc) == str:
                AICc_weib = False
            else:
                AICc_total_weib += weib_fit.AICc
            BIC_total += expon_fit.BIC
            BIC_total_weib += weib_fit.BIC
            if show_plot is True:
                expon_fit.distribution.CDF(linestyle='--', color=color_list[i], xvals=xvals, plot_CI=False) # plotting of the confidence intervals has been turned off
                Probability_plotting.Weibull_probability_plot(failures=FAILURES, right_censored=RIGHT_CENSORED,plot_CI=False, color=color_list[i], label=str(stress))
                plt.legend(title='Stress')
                plt.xlim(10 ** (xmin + 1), 10 ** (xmax - 1))
                plt.title('ALT Exponential Probability Plot')
        self.BIC_sum = np.sum(BIC_total)
        self.BIC_sum_weibull = np.sum(BIC_total_weib)
        if AICc is True:
            self.AICc_sum = np.sum(AICc_total)
        else:
            self.AICc_sum = 'Insufficient Data'
        if AICc_weib is True:
            self.AICc_sum_weibull = np.sum(AICc_total_weib)
        else:
            self.AICc_sum_weibull = 'Insufficient Data'
        beta_difs = (1 - np.array(weibull_fit_beta_array)) / np.array(weibull_fit_beta_array)
        beta_differences = []
        for item in beta_difs:
            if item > 0:
                beta_differences.append(str('+' + str(round(item * 100, 2)) + '%'))
            else:
                beta_differences.append(str(str(round(item * 100, 2)) + '%'))
        results = {'stress': unique_stresses_f, 'weibull alpha': weibull_fit_alpha_array, 'weibull beta': weibull_fit_beta_array, 'new 1/Lambda': 1 / np.array(expon_fit_lambda_array), 'common shape': np.ones_like(unique_stresses_f), 'shape change': beta_differences}
        results_df = pd.DataFrame(results, columns=['stress', 'weibull alpha', 'weibull beta', 'new 1/Lambda', 'common shape', 'shape change'])
        blankIndex = [''] * len(results_df)
        results_df.index = blankIndex
        self.results = results_df
        if print_results is True:
            pd.set_option('display.width', 200)  # prevents wrapping after default 80 characters
            pd.set_option('display.max_columns', 9)  # shows the dataframe without ... truncation
            print('\nALT Exponential probability plot results:')
            print(self.results)
            print('Total AICc:', self.AICc_sum)
            print('Total BIC:', self.BIC_sum)
            print('Total AICc (weibull):', self.AICc_sum_weibull)
            print('Total BIC (weibull):', self.BIC_sum_weibull)
        if self.BIC_sum > self.BIC_sum_weibull:
            print('WARNING: The Weibull distribution would be a more appropriate fit for this data set as it has a lower BIC (using the average method to obtain BIC) than the Exponential distribution.')