Esempio n. 1
0
def fit_estimator(y, x=None, smooth=0):
    """ function that estimates starting parameters for the Gaussian fit """

    if (x == None):
        x = np.arange(0, len(y), 1.0)

    # Estimation starts by estimating the peak center
    peak = peakfinder(y, x, smooth)
    if (peak == []):
        print_line('Gauss', "No peaks were found")
        return []

    center = peak[0]

    # Amplitude estimation by chopping the lower part of the sorted array
    sorted_y = np.sort(y)
    # 20 just looked good... no particular reason
    reduced_y = sorted_y[int(len(sorted_y) / 20):]
    amplitude = (peak[1] - np.median(reduced_y))
    # Continuum is also estimated using the same principle
    continuum = np.median(reduced_y)

    # FWHM estimation by taking the points halfway between continuum and depth
    delta_x = x[1] - x[0]  #we assume it is constant
    for i in range(0, len(x)):
        if (y[i] <= continuum + amplitude / 2.0):
            break

    FWHM = 2.0 * (peak[0] - x[i])

    return [amplitude, center, FWHM, continuum]
Esempio n. 2
0
def gaussfitter(x, y, p0, err=1.0):

    plsq = leastsq(residuals_gaussf, p0, args=(x, y, err))

    if (verbose):
        print_line('Gauss', "The best-fit parameters are: %s" % plsq[0])

    #do modulus correction (note that modulus resulting from fitting
    #can be negative)
    results = [plsq[0][0], plsq[0][1], abs(plsq[0][2]), plsq[0][3]]
    return results
Esempio n. 3
0
def dataset_gaussfit(dataset):
    """ Fits Gaussian function for each observation of the dataset """

    if (verbose == False):
        print_line('Gauss', 'Fitting Gaussian function to the data')

    Gauss_params = [gauss_estimnfit(obs[0], obs[1]) for obs in dataset]

    print_line('Gauss', 'Gaussian function fitting complete')

    if (plotfit):
        # plot fit of gaussian functions
        interGaussianplot(dataset, Gauss_params)

    return Gauss_params
Esempio n. 4
0
def run_indicator_analysis_CUSTOM(indTag, dataset, Gauss_params, file_names,
                                  resultsdir):
    """ do BIS analysis and store output in resultsdir 
	this analysis covers the hypothesys of having BIScustom, an advanced 
	feature capability"""

    print '\n'
    print_line(indTag, 'Starting %(Tag)s analysis' % {'Tag': indTag})

    IndicatorDict = {
        "BIS": bissectorAll,
        "BIS-": bissectorAll,
        "BIS+": bissectorAll,
        "BIScustom": bissectorAll,
        "biGauss": biGauss,
        "Vasy": Vasy
    }
    indicator = IndicatorDict[indTag]

    FluxLevelsDict = {
        "BIS": [0.1, 0.4, 0.6, 0.9],
        "BIS-": [0.3, 0.4, 0.6, 0.7],
        "BIS+": [0.1, 0.2, 0.8, 0.9],
        "biGauss": None,
        "Vasy": None
    }
    if (indTag == "BIScustom"):
        FluxBottomMin, FluxBottomMax, FluxTopMin, FluxTopMax = 1, 1, 1, 1
        while not FluxBottomMin < FluxBottomMax < FluxTopMin < FluxTopMax:
            print_line(
                indTag,
                'Please enter custom flux limits (FluxBottomMin < FluxBottomMax < FluxTopMin < FluxTopMax)'
            )
            try:
                FluxBottomMin, FluxBottomMax = input('{:^12} {:.<19}'.format(
                    '', 'VBottom (min,max): '))
                FluxTopMin, FluxTopMax = input('{:^12} {:<16}'.format(
                    '', 'Vtop (min,max): '))
                FluxLevelsDict["BIScustom"] = [
                    FluxBottomMin, FluxBottomMax, FluxTopMin, FluxTopMax
                ]
            except GeneratorExit, SystemExit:
                sys.exit()
            except:
Esempio n. 5
0
def corr_MCcomp(indTag, vector1, vector2, MC_number=100000):
    """calculates the significance of a correlation by shuffling the data
	to obtain non-correlated data and using it as H0 hypothesis"""
    r_origdata = r_Pearson(vector1, vector2)

    print_line(
        indTag,
        'Calculating significance of correlation using permutation test')

    vector_copy = list(vector2)
    # shuffle vector2 and calculate r_Pearson for each permutation
    r_shuffleddist = [
        r_Pearson(vector1, shuffle_FY(vector_copy)) for i in xrange(MC_number)
    ]
    #N.B.:The copy is shuffled repeatedly instead of creating a copy vector
    # through the usage of list in order to gain time.
    print_line(indTag, 'Permutation test complete')

    #This could be written in one line using the parameters
    # of the function CDF but is written in this way for clarity
    dev_data = abs(r_origdata) / np.std(r_shuffleddist)
    prob_data = (1.0 - norm.cdf(dev_data))

    print_line(indTag,"The Pearson's rho is %(var1)f, and is at %(var2).3f sigma and thus \
having a prob of %(var3).3f %% when assuming a (single-tailed) Gaussian distribution."    \
      %{'var1':r_origdata, 'var2':dev_data,'var3':(prob_data*100.0) })

    #print "["+indTag+"] The Pearson's rho is %(var1)f, and is at %(var2).3f sigma and thus \
    #having a prob of %(var3).3f %% when assuming a (single-tailed) Gaussian distribution." \
    #%{'var1':r_origdata, 'var2':dev_data,'var3':(prob_data*100.0)}

    return [r_origdata, dev_data, prob_data]
Esempio n. 6
0
def run_indicator_analysis(indTag, dataset, Gauss_params, file_names,
                           resultsdir, RVextvalues):
    """ do BIS analysis and store output in resultsdir """
    print '\n'
    print_line(indTag, 'Starting %(Tag)s analysis' % {'Tag': indTag})

    #Definition of the indicator to use from the indTag parameter provided
    IndicatorDict = {
        "BIS": bissector,
        "BIS-": bissector,
        "BIS+": bissector,
        "biGauss": biGauss,
        "Vasy": Vasy,
        "Vspan": Vspan,
        "FWHM": FWHM_analysis
    }
    indicator = IndicatorDict[indTag]

    #association of flux level to the different BIS
    FluxLevelsDict = {
        "BIS": [0.1, 0.4, 0.6, 0.9],
        "BIS-": [0.3, 0.4, 0.6, 0.7],
        "BIS+": [0.1, 0.2, 0.8, 0.9],
        "biGauss": None,
        "Vasy": None,
        "Vspan": None
    }

    if (indicator == bissector):
        #use flux levels in case bisector is selected
        Ind_pars = [
            indicator(DataLine[0], DataLine[1], GaussParam,
                      FluxLevelsDict[indTag])
            for DataLine, GaussParam in zip(dataset, Gauss_params)
        ]
    else:
        Ind_pars = [
            indicator(DataLine[0], DataLine[1], GaussParam)
            for DataLine, GaussParam in zip(dataset, Gauss_params)
        ]

    print_line(indTag, '%(Tag)s analysis complete' % {'Tag': indTag})

    RV, Ind = rearrange_indicator_data(Ind_pars)

    if (RVextvalues != []):
        #RV values to be used become RV external values
        print_line(indTag, 'Note: External RV values will be used.')
        RV = np.array(RVextvalues)

    slope_params, fitted_slope, chi2, std_res = StatTools.fitter_slope(
        RV, Ind)
    rPearson_data, dev_data, prob_data = StatTools.corr_MCcomp(indTag, RV, Ind)

    output_ASCII = save_results(resultsdir, indTag, file_names, RV, Ind,
                                Gauss_params, fitted_slope, slope_params, chi2,
                                std_res, rPearson_data, dev_data, prob_data)

    return output_ASCII
Esempio n. 7
0
def peakfinder(y, x=None, smooth=0):
    """ function that finds the peak in a Gaussian function """

    if (x == None):
        x = np.arange(0, len(y), 1.0)

    # This peakfinder performs 2 derivatives and then extracts the maximum of the 2nd.
    # The first gets the variation of the spectra and finds local minima.
    deriv1 = differentiate(y, x)
    if (smooth == 1):
        # Smoothing in a size-3 box
        smoothed = np.zeros(len(deriv1))
        smoothed[0] = deriv1[0]
        smoothed[-1] = deriv1[-1]
        for i in range(1, len(deriv1) - 1):
            smoothed[i] = np.mean(deriv1[i - 1:i + 2])
    else:
        smoothed = deriv1

    # Peak listing
    zerolist = []
    for i in range(len(smoothed) - 1):
        if (smoothed[i] < 0.0 and smoothed[i + 1] > 0.0):
            zerolist.append([x[i + 2], y[i + 1]])
    if (zerolist == []):
        print_line('Gauss', "No peaks were found")
        return zerolist
    else:
        lower = zerolist[0][1]
        low_set = zerolist[0]
        # Here we assumed that the probability of finding 2 peaks with exactly the same value was negligible.
        for i in range(len(zerolist)):
            if (zerolist[i][1] < lower):
                lower = zerolist[i][1]
                low_set = zerolist[i]

        return low_set
Esempio n. 8
0
                sys.exit()
            except:
                print 'Wrong input!'

    try:
        Ind_pars = [
            indicator(DataLine[0], DataLine[1], GaussParam,
                      FluxLevelsDict[indTag])
            for DataLine, GaussParam in zip(dataset, Gauss_params)
        ]
    except UnboundLocalError:
        Ind_pars = [
            indicator(DataLine[0], DataLine[1], GaussParam)
            for DataLine, GaussParam in zip(dataset, Gauss_params)
        ]

    print_line(indTag, '%(Tag)s analysis complete' % {'Tag': indTag})

    RV, Ind = rearrange_indicator_data(Ind_pars)

    slope_params, fitted_slope, chi2, std_res = StatTools.fitter_slope(
        RV, Ind)
    rPearson_data, dev_data, prob_data = StatTools.corr_MCcomp(indTag, RV, Ind)

    save_results(resultsdir, indTag, file_names, RV, Ind, Gauss_params,
                 fitted_slope, slope_params, chi2, std_res, rPearson_data,
                 dev_data, prob_data)


################################################################################