Beispiel #1
0
def fit_it(params, args, method='nelder', kwargs=None):
    """ Carries out the fit.

    Parameters
    ----------
    params : lmfit.Parameters() instance
        Call load_params to generate.
    args : tuple
        Arguments to pass to the function to minimize. Must contain a
        wavelength array as first argument, optional second and third argument
        will be interpreted as data and errors, respectively.
        arrays are optional.
    kwargs : tuple
        keyword arguments, will be passed directly to the lmfit.minimize()
        function. See lmfit docs for options.

    Returns
    -------
    result : lmfit.Minimizer() object
    """
    # For testing:
    x = args[0]
    data = args[1]
    errs = args[2]
    earlymodel = build_model(params, x)
    # Now: fitting.
    print params
    try:
        result = lf.minimize(build_model, params, args=args, method=method)
        result = lf.minimize(build_model, params, args=args, method=method)
        result = lf.minimize(build_model, params, args=args, method='lestsq')
    except:
        result = lf.minimize(build_model, params, args=args, method=method)
        result = lf.minimize(build_model, params, args=args, method='leastsq')
    lf.report_errors(params)
    # Now: moar plots
    latemodel = build_model(result.params, x)
    #import matplotlib.pyplot as plt
    #plt.clf()
    #plt.errorbar(x, data, yerr=errs, color='black', label='Data', lw=2.)
    #plt.axhline(y=0., color='black')
    #plt.plot(x, earlymodel, lw=1.6, label='Guess', color='green')
    #plt.plot(x, latemodel, lw=1.6, label='Fit', color='orange')
    #plt.legend(fancybox=True, shadow=True)
    #plt.grid()
    #plt.title('Plot of initial guess and LMfit best fit.')
    #plt.xlabel(u'Wavelegth')
    #plt.ylabel('Flux')
    #plt.show()

    return result
Beispiel #2
0
def test_ci():
    np.random.seed(1)
    p_true = Parameters()
    p_true.add('amp', value=14.0)
    p_true.add('period', value=5.33)
    p_true.add('shift', value=0.123)
    p_true.add('decay', value=0.010)

    def residual(pars, x, data=None):
        amp = pars['amp']
        per = pars['period']
        shift = pars['shift']
        decay = pars['decay']

        if abs(shift) > pi / 2:
            shift = shift - np.sign(shift) * pi
        model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay)
        if data is None:
            return model
        return model - data


    n = 2500
    xmin = 0.
    xmax = 250.0
    noise = np.random.normal(scale=0.7215, size=n)
    x = np.linspace(xmin, xmax, n)
    data = residual(p_true, x) + noise

    fit_params = Parameters()
    fit_params.add('amp', value=13.0)
    fit_params.add('period', value=4)
    fit_params.add('shift', value=0.1)
    fit_params.add('decay', value=0.02)

    out = minimize(residual, fit_params, args=(x,), kws={'data': data})

    fit = residual(fit_params, x)

    print( ' N fev = ', out.nfev)
    print( out.chisqr, out.redchi, out.nfree)

    report_errors(fit_params)
    ci, tr = conf_interval(out, sigmas=[0.674], trace=True)
    report_ci(ci)
    for p in out.params:
        diff1 = ci[p][1][1] - ci[p][0][1]
        diff2 = ci[p][2][1] - ci[p][1][1]
        stderr = out.params[p].stderr
        assert(abs(diff1 - stderr) / stderr < 0.05)
        assert(abs(diff2 - stderr) / stderr < 0.05)
Beispiel #3
0
def fit_rayl_sky_filter(all_data, in_params, filter_name, rayl_angle):
    params = Parameters()
    params.add('k', value=in_params['k'].value, min=0.0, max=5.0, vary=False)
    params.add('m_inf',
               value=in_params['m_inf'].value,
               min=15.0,
               max=30.0,
               vary=False)
    params.add('m_zen',
               value=in_params['m_zen'].value,
               min=15.0,
               max=30.0,
               vary=False)
    params.add('h', value=in_params['h'].value, min=60, max=3000, vary=False)
    params.add('g', value=in_params['g'].value, min=0.0, max=1.0, vary=False)
    params.add('mie_m',
               value=in_params['mie_m'].value,
               min=10.0,
               max=30.0,
               vary=False)
    params.add('rayl_m',
               in_params['rayl_m'].value,
               min=10.0,
               max=30.0,
               vary=True)
    params.add('sun_dm',
               in_params['sun_dm'].value,
               min=-20.0,
               max=-10.0,
               vary=False)
    params.add('twi1',
               value=in_params['twi1'],
               min=-100.0,
               max=100.0,
               vary=False)
    params.add('twi2',
               value=in_params['twi2'],
               min=-100.0,
               max=100.0,
               vary=False)

    data = all_data.query(
        "band == '%s' and airmass<2.0 and moon_brightness>0.0 and moon_angle>%f and moon_zd<80.0 and sun_zd>108.0"
        % (filter_name, rayl_angle))

    print "Fitting to %d points" % len(data)
    fit = minimize(residuals, params, args=(data, ))

    report_errors(fit.params)
    return fit.params
Beispiel #4
0
def fit_twilight(all_data, in_params, filter_name):
    params = Parameters()
    params.add('k', value=in_params['k'].value, min=0.0, max=5.0, vary=False)
    params.add('m_inf',
               value=in_params['m_inf'].value,
               min=15.0,
               max=30.0,
               vary=False)
    params.add('m_zen',
               value=in_params['m_zen'].value,
               min=15.0,
               max=30.0,
               vary=False)
    params.add('h', value=in_params['h'].value, min=80, max=3000, vary=False)
    params.add('g', value=in_params['g'].value, min=0.0, max=1.0, vary=False)
    params.add('mie_m',
               value=in_params['mie_m'].value,
               min=10.0,
               max=30.0,
               vary=False)
    params.add('rayl_m',
               in_params['rayl_m'].value,
               min=10.0,
               max=30.0,
               vary=False)
    params.add('sun_dm',
               in_params['sun_dm'].value,
               min=-20.0,
               max=-10.0,
               vary=True)
    params.add('twi1',
               value=in_params['twi1'],
               min=-100.0,
               max=100.0,
               vary=True)
    params.add('twi2',
               value=in_params['twi2'],
               min=-100.0,
               max=100.0,
               vary=True)

    data = all_data.query(
        "band == '%s' and airmass < 2.0 and moon_zd > 108.0 and sun_zd < 106.0"
        % filter_name)

    fit = minimize(residuals, params, args=(data, ))

    report_errors(fit.params)
    return fit.params
Beispiel #5
0
def fit_hi_vel_range(guesses=None,
                     av_image=None,
                     av_image_error=None,
                     hi_cube=None,
                     hi_velocity_axis=None,
                     hi_noise_cube=None,
                     dgr=None):

    from scipy.optimize import curve_fit
    from scipy import stats
    from lmfit import minimize, Parameters, report_fit, report_errors
    from pprint import pprint

    params = Parameters()
    params.add('low_vel', value=guesses[0], min=-100, max=100, vary=True)
    params.add('high_vel', value=guesses[1], min=-100, max=100, vary=True)

    result = minimize(
        calc_model_chisq,
        params,
        kws={
            'av_image': av_image,
            'av_image_error': av_image_error,
            'hi_cube': hi_cube,
            'hi_velocity_axis': hi_velocity_axis,
            'hi_noise_cube': hi_noise_cube,
            'dgr': dgr
        },
        #method='BFGS',
        method='anneal',
        #method='powell',
        #method='SLSQP',
        options={
            'gtol': 1e-6,
            'disp': True,
            'maxiter': 1e9
        })

    report_fit(params)
    report_errors(params)
    print result.values
    print result.errorbars
    #print(result.__dict__)
    #print(dir(result))
    #print result.vars
    #print help(result)
    print result.view_items()
    print result.Bopt
def fit_circle(x, y, xc=0.0, yc=0.0):
    """
    Fit a circle to the shape of an arc with coordinates x, y

    Optionally provide initial guesses for the circle parameters: 
    xc, yc, Rc
    """
    params = lmfit.Parameters()
    params.add("xc", value=xc)
    params.add("yc", value=yc)
    lmfit.minimize(model_minus_data, params, args=(x, y))
    lmfit.report_errors(params)
    xc = params["xc"].value
    yc = params["yc"].value
    Rc = Rc_from_data(x, y, xc, yc)
    return Rc, xc, yc
def fit_hi_vel_range(guesses=None, av_image=None, av_image_error=None,
        hi_cube=None, hi_velocity_axis=None, hi_noise_cube=None, dgr=None):

    from scipy.optimize import curve_fit
    from scipy import stats
    from lmfit import minimize, Parameters, report_fit, report_errors
    from pprint import pprint

    params = Parameters()
    params.add('low_vel',
               value=guesses[0],
               min=-100,
               max=100,
               vary=True)
    params.add('high_vel',
               value=guesses[1],
               min=-100,
               max=100,
               vary=True)

    result = minimize(calc_model_chisq,
                      params,
                      kws={'av_image': av_image,
                           'av_image_error': av_image_error,
                           'hi_cube': hi_cube,
                           'hi_velocity_axis': hi_velocity_axis,
                           'hi_noise_cube': hi_noise_cube,
                           'dgr': dgr},
                      #method='BFGS',
                      method='anneal',
                      #method='powell',
                      #method='SLSQP',
                      options={'gtol': 1e-6,
                               'disp': True,
                               'maxiter' : 1e9}
                      )

    report_fit(params)
    report_errors(params)
    print result.values
    print result.errorbars
    #print(result.__dict__)
    #print(dir(result))
    #print result.vars
    #print help(result)
    print result.view_items()
    print result.Bopt
def fit_hi_vel_range(
    guesses=None, av_image=None, av_image_error=None, hi_cube=None, hi_velocity_axis=None, hi_noise_cube=None, dgr=None
):

    from scipy.optimize import curve_fit
    from scipy import stats
    from lmfit import minimize, Parameters, report_fit, report_errors
    from pprint import pprint

    params = Parameters()
    params.add("low_vel", value=guesses[0], min=-100, max=100, vary=True)
    params.add("high_vel", value=guesses[1], min=-100, max=100, vary=True)

    result = minimize(
        calc_model_chisq,
        params,
        kws={
            "av_image": av_image,
            "av_image_error": av_image_error,
            "hi_cube": hi_cube,
            "hi_velocity_axis": hi_velocity_axis,
            "hi_noise_cube": hi_noise_cube,
            "dgr": dgr,
        },
        # method='BFGS',
        method="anneal",
        # method='powell',
        # method='SLSQP',
        options={"gtol": 1e-6, "disp": True, "maxiter": 1e9},
    )

    report_fit(params)
    report_errors(params)
    print result.values
    print result.errorbars
    # print(result.__dict__)
    # print(dir(result))
    # print result.vars
    # print help(result)
    print result.view_items()
    print result.Bopt
print 'SNR (weighted) for this flux: ', wsnr

fig=pl.figure()
ax = fig.add_subplot(121)
im = ax.imshow(mask)
ax2 =fig.add_subplot(122)
im2 = ax2.imshow(mixImNoSky.array)
pl.show()

#Minimize residual of mixture and fit with params
if PSF==True:
	out = lm.minimize(lib.residualPSF, params, args=[mixIm])
else:
	out = lm.minimize(lib.residual, params, args=[mixIm])
print 'The minimum least squares is:', out.chisqr
lm.report_errors(params)

#Draw best fit
fitIm = lib.drawFit(params)

#Plot mixture, best fit, and residual
fig = pl.figure()
domStr = 'Dominant Galaxy: (' + str(domParams['centX'].value) + ', ' + str(domParams['centY'].value) + '), ' + str(DOMINANT_FLUX) + ', ' + str(DOMINANT_HALF_LIGHT_RADIUS) + ', ' + str(DOMINANT_FLUX_FRACTION) + ', 0, 0'
contStr = 'Contaminant Galaxy: (' + str(dx) + ', ' + str(dy) + '), ' + str(CONTAMINANT_FLUX) + ', ' + str(CONTAMINANT_HALF_LIGHT_RADIUS) + ', ' + str(CONTAMINANT_FLUX_FRACTION) + ', 0, 0'
fitStr = 'Fit: (' + str(np.around(params['fitCentX'].value, decimals=2)) + ', ' + str(np.around(params['fitCentY'].value, decimals=2)) + '), ' + str(np.around(params['fitDiskFlux'].value, decimals=2)) + ', '  + str(np.around(params['fitDiskHLR'].value, decimals=2)) + ', ' + str(np.around(params['fite1'].value, decimals=2)) + ', ' + str(np.around(params['fite2'].value, decimals=2))
titleStr = 'Parameters (centroid, flux, hlr, flux fraction, e1, e2)\n' + domStr + '\n' + contStr + '\n' + fitStr + '\nPixel Scale: ' + str(PIXEL_SCALE) + ' arcsec/pixel'
fig.suptitle(titleStr, fontsize=18)

ax11 = fig.add_subplot(131)
c11 = ax11.imshow(mixIm.array, origin='lower')
ax11.set_title('Sersic Mixture')
Beispiel #10
0
params = lmfit.Parameters()

params.add('A', value=0, vary=True)  ##cos ### -3 cXZ sin 2 chi: 7 +- 23 mHz
params.add('B', value=0, vary=True)  ##sin ### -3 cYZ sin 2 chi: 32 +- 56 mHz
params.add('C', value=0,
           vary=True)  ##cos2 ### -1.5 (cXX-cYY) sin^2 chi: 15 +- 22 mHz
params.add('D', value=0, vary=True)  ##sin2 ### -3 cXY sin^2 chi: 8 +- 20 mHz
#params.add('offset', value = 0.0)

result = lmfit.minimize(cosine_fit, params, args=(x, y, yerr))

residual_array = result.residual * yerr

#fit_values  = y + result.residual

lmfit.report_errors(params, min_correl=0)

print "Reduced chi-squared = ", result.redchi

#print result.redchi

#print 1/params['freq'].value/3600

x_plot = np.linspace(x.min(), x.max(), 1000)

figure = pyplot.figure(i + 1)
figure.clf()

pyplot.plot(x, y, '-')
pyplot.plot(x_plot, cosine_model(params, x_plot), linewidth=3.0)
#pyplot.errorbar(time_array,freq_array,width_array, linestyle='None',markersize = 4.0,fmt='o',color='black')
Beispiel #11
0
def loop(variable, initial, delta, num):
    import param as op
    reload(op)
    runner = initial + np.arange(num) * delta
    chisqr = []
    for kk in runner:
        if os.path.isfile(op.file) == False:
            print('Error: No input file')
            print('Check that file is in current folder')
            sys.exit()

        data = np.loadtxt(op.file, usecols=(0, 1))

        hjd, vel, err = [], [], []
        if len(data[0]) == 3:
            for i in data:
                hjd.append(i[0]), vel.append(i[1]), err.append(i[2])
        if len(data[0]) == 2:
            for i in data:
                hjd.append(i[0]), vel.append(i[1])
        hjd, vel = np.array(hjd), np.array(vel)

        # %%%%%%%%%%%%%%%%%%%%%%%% ORBIT PARAMETERS

        params = lm.Parameters()
        if variable == 'porb':
            params.add('porb', value=kk, vary=False)
            var_label = 'P$_{orb}$ / d'
        else:
            params.add('porb', value=op.porb, vary=op.fix_porb)
        if variable == 'hjd0':
            params.add('hjd0', value=kk, vary=False)
            var_label = 'HJD$_{0}$ / d'
        else:
            params.add('hjd0', value=op.hjd0, vary=op.fix_hjd0)
        if variable == 'gama':
            params.add('gama', value=kk, vary=False)
            var_label = '$\gamma$ / km s$^{-1}$'
        else:
            params.add('gama', value=op.gama, vary=op.fix_gama)
        if variable == 'k1':
            params.add('k1', value=kk, vary=False)
            var_label = 'K$_{1}$ / km s$^{-1}$'
        else:
            params.add('k1', value=op.k1, vary=op.fix_k1)

        if op.errors:
            err = np.loadtxt(op.file)[:, 2]
        else:
            err = op.sigma + np.zeros(len(hjd))

        def res_sin3(pars, x, data=None, sigma=err):
            porb = pars['porb'].value
            hjd0 = pars['hjd0'].value
            gama = pars['gama'].value
            k1 = pars['k1'].value
            model = gama + k1 * np.sin(2 * np.pi * ((x - hjd0) / porb))
            if data is None:
                return model
            if sigma is None:
                return (model - data)
            return (model - data) / sigma

        results = lm.minimize(res_sin3, params, args=(hjd, vel))
        print('Looping ' + str(variable) + ': ' + str(kk) + '\n')
        lm.report_errors(params, show_correl=False)

        print('DoF = ', results.nfree)
        print('Chi-squared = ', results.chisqr)
        print('--------------')
        chisqr.append(results.chisqr)
    chisqr = np.array(chisqr)
    print('%%%%%%%%%%%%%%%%%%%5%%%')
    print("Minimum Chi^2: ", runner[chisqr == np.min(chisqr)][0])
    print("Porb_min = {:.8f}".format(runner[chisqr == np.min(chisqr)][0]))
    print('%%%%%%%%%%%%%%%%%%%5%%%')
    plt.figure(num='Loop', facecolor='w')
    plt.clf()
    plt.plot(runner, chisqr / results.nfree, 'rs', markersize=6)
    plt.plot(runner, chisqr / results.nfree, 'k-')
    plt.axvline(x=runner[chisqr == np.min(chisqr)][0],
                ls='--',
                color='b',
                alpha=0.4)
    plt.xlabel(var_label)
    plt.ylabel('$\chi^2$ / ' + str(results.nfree) + ' dof')
    plt.show()
    plt.tight_layout()
def main():

    #----------------------------------------------------------------------
    # import data
    #----------------------------------------------------------------------

    data = genfromtxt(sys.argv[1])

    t_c = data[2:, 0]  #time
    y_c = data[2:, 1]  #measured decay curve y(t)
    g_c = data[2:, 2]  #impulse g(t)

    #----------------------------------------------------------------------
    # lmfit section
    #----------------------------------------------------------------------

    params = Parameters()
    params.add('I_0', value=1.0)
    params.add('time0', value=1.0)
    # minimize residuals using lmfit
    # with Levenberg-Marquardt method,
    # the first, "signal" fitting procedure

    rezult = minimize(Conv_residuals,
                      params,
                      args=(t_c, y_c, g_c),
                      method='leastsq')
    #--------------------------------------------------------------------
    # leastsq, nelder, lbfgsb, anneal, powell, cg, newton, cobyla, slsqp

    provisional_parameters = rezult  #will be printed at the end of the code

    residuals = Conv_residuals(rezult.params, t_c, y_c, g_c)  #residuals
    y_lmfit = y_c + residuals  #fitted line

    #----------------------------------------------------------------------
    # bootstrapping section
    #----------------------------------------------------------------------

    I_0 = []  # arrays of calculated parameters
    t_0 = []  # during bootstrapping

    iteration = int(input('How many bootstrapping iterations? '))

    # number of boostrap procedures

    for i in range(iteration):

        params = Parameters()  # repeat lmfit minimize using
        params.add('I_0', value=10.0)  # Levenberg-Marquardt method
        params.add('time0', value=1.0)  # with
        # y(t) + randomly sampled residuals

        rezult = minimize(Conv_residuals,
                          params,
                          args=(t_c, bootstrap(y_c, residuals), g_c),
                          method='leastsq')

        I_0.append(rezult.params['I_0'].value)
        t_0.append(rezult.params['time0'].value)

    #----------------------------------------------------------------------
    # quantiles/errors and means
    #----------------------------------------------------------------------

    err_I_0 = stats.mstats.mquantiles(I_0, [0.0, 1.0])
    err_t_0 = stats.mstats.mquantiles(t_0, [0.0, 1.0])

    # 0.25, 0.75 -quantiles may be used instead of the full span

    #----------------------------------------------------------------------
    # runs test
    #----------------------------------------------------------------------

    np = nm = 0  # number of positive and negative residuals, respectively
    nR = 1  # observed number of runs (changes of sign)

    if residuals[0] < 0:
        nm += 1

    for i in range(1, len(residuals)):  # loop for calculating
        # nm and nR
        if residuals[i] < 0:
            nm += 1

            if residuals[i - 1] > 0:
                nR += 1

        elif residuals[i - 1] < 0:
            nR += 1

    np = len(residuals) - nm  # np - number of positive residuals

    R = 1 + (2 * np * nm) / (np + nm)  #expected number of runs

    sigma_R = sqrt(2 * nm * np * (2 * nm * np - np - nm) / ((np + nm - 1) *
                                                            (np + nm)**2))
    #variance of the expected number of runs

    if nR <= R:
        Z = (nR - R + 0.5) / sigma_R
    else:  # estimated standard normal
        Z = (nR - R - 0.5) / sigma_R  # distribution (Z-score)

    #----------------------------------------------------------------------
    #report results of calculations
    #----------------------------------------------------------------------

    print('\nLMFIT report:\n')  # results from the 'signal' fit
    report_errors(provisional_parameters)

    print('\nBootstrapping report:\n\nI_0 =', "%.4f" % median(I_0), '\t(-',
          "%.4f" % (100 * ((median(I_0) - err_I_0[0]) / median(I_0))), '% / +',
          "%.4f" % (100 * ((err_I_0[1] - median(I_0)) / median(I_0))), '%)')

    # NOTE! since the statistical approach
    # has been used, medians are more relevant
    # instead of means

    print('t_0 =', "%.4f" % median(t_0), '\t(-',
          "%.4f" % (100 * ((median(t_0) - err_t_0[0]) / median(t_0))), '% / +',
          "%.4f" % (100 * ((err_t_0[1] - median(t_0)) / median(t_0))), '%)\n')

    print('Runs test:\n\n Numbers of points:\n n_m =', nm, '\n n_p =', np,
          '\n\n'
          'Observed number of runs n_R =', nR, '\n'
          'Expected number of runs R =', "%.4f" % R, '+/-', "%.4f" % sigma_R,
          '\n'
          'The standard normal distribution score Z =', "%.4f" % Z)

    #----------------------------------------------------------------------
    # plot section
    #----------------------------------------------------------------------

    f_c = []  # luminescence decay as exp-model

    for i in range(len(t_c)):
        f_c.append(median(I_0) * exp(-(t_c[i]) / median(t_0)))

    suptitle(r'Decay kinetics of BaF$_2$ 78 nm nanoparticles', fontsize=18)

    subplot(211)
    plot(t_c, y_c / max(y_c), 'bo',
         label=r'$y(t)$')  # all graphs are normalized
    plot(t_c, g_c / max(g_c), 'ro', label=r'$g(t)$')
    plot(t_c, y_lmfit / max(y_lmfit), 'm-', label='fitting curve')
    plot(t_c, f_c / max(f_c), 'g.--', label=r'$f(t)$')

    xlabel('Time (ns)', fontsize=15)
    ylabel('Intensity (a.u.)', fontsize=16)
    legend(loc=1)

    subplot(212)
    stem(t_c, residuals, linefmt='g--', markerfmt='bs', basefmt='r-')
    xlabel('Time (ns)', fontsize=15)
    ylabel(r'Residuals $y - y_{model}$', fontsize=16)

    subplots_adjust(hspace=0.3, wspace=0.3, right=0.95, top=0.92)
    show()

    #------------Histograms----------------------------

    suptitle(r'Decay kinetics of BaF$_2$ 78 nm nanoparticles', fontsize=18)

    subplot(121)
    hist(I_0, color='green')
    xlabel(r'$I_0$ (a.u.)', fontsize=16)
    ylabel(r'Frequency', fontsize=16)

    subplot(122)
    hist(t_0, color='green')
    xlabel(r'$t_0$ (ns)', fontsize=16)
    ylabel('Frequency', fontsize=16)

    subplots_adjust(hspace=0.4, left=0.1, right=0.95, top=0.92)
    show()
 def report(self, params):
     lmfit.report_errors(params)
def test_report_errors_deprecated(fitresult):
    """Verify that a DeprecationWarning is shown when calling report_errors."""
    with pytest.deprecated_call():
        report_errors(params=fitresult.params)
Beispiel #15
0
        omegataus.pop(i)
        masterchi.pop(i)
        mastererr.pop(i)
params["beta"].vary = True
params["logK_dd"].value = 0
params["logK_dd"].min = -2
params["logK_dd"].max = 2
params["logtau"].value = 0
params["logtau"].min = -3
params["logtau"].max = 3
omegataus = np.array(omegataus)
out = minimize(residual, params, args=(omegataus, masterchi, mastererr))
result = omegataus + out.residual
fit = residual(params, omegataus)
print "beta " + str(params["beta"].value)
report_errors(params)

####parameter updaten und ausgabe anpassen
while True:
    tauout = open("tau.dat", "w")
    for i in range(0, taus.__len__()):
        taus[i] = taus[i] + params["logtau"].value
        tauout.write(str(temps[i]) + " " + str(taus[i]) + "\n")
    break
omegataus = []
masterchi = []

ax.cla()
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_title("Masterkurve")
    def extractfeatures_inside(self, DICOMImages, image_pos_pat, image_ori_pat, series_path, phases_series, VOI_mesh):
        """ Start pixVals for collection pixel values at VOI """
        pixVals = []
        deltaS = {}

        # necessary to read point coords
        VOIPnt = [0, 0, 0]
        ijk = [0, 0, 0]
        pco = [0, 0, 0]

        for i in range(len(DICOMImages)):
            abspath_PhaseID = series_path + os.sep + str(phases_series[i])
            print phases_series[i]

            # Get total number of files
            load = Inputs_init()
            [len_listSeries_files, FileNms_slices_sorted_stack] = load.ReadDicomfiles(abspath_PhaseID)
            mostleft_slice = FileNms_slices_sorted_stack.slices[0]

            # Get dicom header, retrieve
            dicomInfo_series = dicom.read_file(abspath_PhaseID + os.sep + str(mostleft_slice))

            # (0008,0031) AT S Series Time            # hh.mm.ss.frac
            seriesTime = str(dicomInfo_series[0x0008, 0x0031].value)
            # (0008,0033) AT S Image Time             # hh.mm.ss.frac
            imageTime = str(dicomInfo_series[0x0008, 0x0033].value)

            # (0008,0032) AT S Acquisition Time       # hh.mm.ss.frac
            ti = str(dicomInfo_series[0x0008, 0x0032].value)

            acquisitionTimepoint = datetime.time(hour=int(ti[0:2]), minute=int(ti[2:4]), second=int(ti[4:6]))
            self.timepoints.append(datetime.datetime.combine(datetime.date.today(), acquisitionTimepoint))

            # find mapping to Dicom space
            [transformed_image, transform_cube] = Display().dicomTransform(DICOMImages[i], image_pos_pat, image_ori_pat)

            ### Get inside of VOI
            [VOI_scalars, VOIdims] = self.createMaskfromMesh(VOI_mesh, transformed_image)
            print "\n VOIdims"
            print VOIdims

            # get non zero elements
            image_scalars = transformed_image.GetPointData().GetScalars()
            numpy_VOI_imagedata = vtk_to_numpy(image_scalars)

            numpy_VOI_imagedata = numpy_VOI_imagedata.reshape(VOIdims[2], VOIdims[1], VOIdims[0])
            numpy_VOI_imagedata = numpy_VOI_imagedata.transpose(2, 1, 0)

            print "Shape of VOI_imagedata: "
            print numpy_VOI_imagedata.shape

            #################### HERE GET IT AND MASK IT OUT
            self.nonzeroVOIextracted = nonzero(VOI_scalars)
            print self.nonzeroVOIextracted

            VOI_imagedata = numpy_VOI_imagedata[self.nonzeroVOIextracted]

            print "shape of VOI_imagedata  Clipped:"
            print VOI_imagedata.shape

            for j in range(len(VOI_imagedata)):
                pixValx = VOI_imagedata[j]
                pixVals.append(pixValx)

            # Now collect pixVals
            print "Saving %s" % "delta" + str(i)
            deltaS["delta" + str(i)] = pixVals
            pixVals = []

        print self.timepoints

        # Collecting timepoints in proper format
        t_delta = []
        t_delta.append(0)
        total_time = 0
        for i in range(len(DICOMImages) - 1):
            current_time = self.timepoints[i + 1]
            previous_time = self.timepoints[i]
            difference_time = current_time - previous_time
            timestop = divmod(difference_time.total_seconds(), 60)
            t_delta.append(t_delta[i] + timestop[0] + timestop[1] * (1.0 / 60))
            total_time = total_time + timestop[0] + timestop[1] * (1.0 / 60)

        # finally print t_delta
        print t_delta
        t = array(t_delta)
        print "total_time"
        print total_time

        ##############################################################
        # Finished sampling deltaS
        # APply lmfit to deltaS
        # first sample the mean
        data_deltaS = []
        t_deltaS = []
        mean_deltaS = []
        sd_deltaS = []
        se_deltaS = []
        n_deltaS = []

        # append So and to
        data_deltaS.append(0)
        t_deltaS.append(0)
        mean_deltaS.append(mean(deltaS["delta0"]))
        sd_deltaS.append(0)
        se_deltaS.append(0)
        n_deltaS.append(len(deltaS["delta0"]))

        for k in range(1, len(DICOMImages)):
            deltaS_i = (mean(array(deltaS["delta" + str(k)]).astype(float)) - mean(deltaS["delta0"])) / mean(
                deltaS["delta0"]
            )
            data_deltaS.append(deltaS_i)
            t_deltaS.append(k)
            print "delta" + str(k)
            print data_deltaS[k]

            ##############################################################
            # Calculate data_error
            # estimate the population mean and SD from our samples to find SE
            # SE tells us the distribution of individual scores around the sampled mean.
            mean_deltaS_i = mean(array(deltaS["delta" + str(k)]))
            std_deltaS_i = std(array(deltaS["delta" + str(k)]))
            n_deltaS_i = len(array(deltaS["delta" + str(k)]))

            sd_deltaS.append(std_deltaS_i)
            mean_deltaS.append(mean_deltaS_i)

            # Standard Error of the mean SE
            # the smaller the variability in the data, the more confident we are that one value (the mean) accurately reflects them.
            se_deltaS.append(std_deltaS_i / sqrt(n_deltaS_i))
            n_deltaS.append(n_deltaS_i)

        # make array for data_deltaS
        data = array(data_deltaS)

        print "\n================\nMean and SE (i.e VOI sample data)"
        print mean_deltaS
        print se_deltaS

        # create a set of Parameters
        params = Parameters()
        params.add("amp", value=10, min=0)
        params.add("alpha", value=1, min=0)
        params.add("beta", value=0.05, min=0.0001, max=0.9)

        # do fit, here with leastsq model
        # define objective function: returns the array to be minimized
        def fcn2min(params, t, data):
            global model, model_res, x
            """ model EMM for Bilateral DCE-MRI, subtract data"""
            # unpack parameters:
            #  extract .value attribute for each parameter
            amp = params["amp"].value  # Upper limit of deltaS
            alpha = params["alpha"].value  # rate of signal increase min-1
            beta = params["beta"].value  # rate of signal decrease min-1

            model = amp * (1 - exp(-alpha * t)) * exp(-beta * t)

            x = linspace(0, t[4], 101)
            model_res = amp * (1 - exp(-alpha * x)) * exp(-beta * x)

            return model - data

        #####
        myfit = Minimizer(fcn2min, params, fcn_args=(t,), fcn_kws={"data": data})
        myfit.prepare_fit()
        myfit.leastsq()

        # On a successful fit using the leastsq method, several goodness-of-fit statistics
        # and values related to the uncertainty in the fitted variables will be calculated
        print "myfit.success"
        print myfit.success
        print "myfit.residual"
        print myfit.residual
        print "myfit.chisqr"
        print myfit.chisqr
        print "myfit.redchi"
        print myfit.redchi

        # calculate final result
        final = data + myfit.residual
        # write error report
        report_errors(params)

        # Calculate R-square
        # R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
        R_square = sum((model - mean(data)) ** 2) / sum((data - mean(data)) ** 2)
        print "R^2"
        print R_square

        self.amp = params["amp"].value
        self.alpha = params["alpha"].value
        self.beta = params["beta"].value

        ##################################################
        # Now Calculate Extract parameters from model
        self.iAUC1 = params["amp"].value * (
            ((1 - exp(-params["beta"].value * t[1])) / params["beta"].value)
            + (exp((-params["alpha"].value + params["beta"].value) * t[1]) - 1)
            / (params["alpha"].value + params["beta"].value)
        )
        print "iAUC1"
        print self.iAUC1

        self.Slope_ini = params["amp"].value * params["alpha"].value
        print "Slope_ini"
        print self.Slope_ini

        self.Tpeak = (1 / params["alpha"].value) * log(1 + (params["alpha"].value / params["beta"].value))
        print "Tpeak"
        print self.Tpeak

        self.Kpeak = -params["amp"].value * params["alpha"].value * params["beta"].value
        print "Kpeak"
        print self.Kpeak

        self.SER = exp((t[4] - t[1]) * params["beta"].value) * (
            (1 - exp(-params["alpha"].value * t[1])) / (1 - exp(-params["alpha"].value * t[4]))
        )
        print "SER"
        print self.SER

        ##################################################
        # Now Calculate enhancement Kinetic based features
        # Based on the course of signal intensity within the lesion
        print "\n Saving %s" % "Crk"
        So = array(deltaS["delta0"]).astype(float)
        Crk = {"Cr0": mean(So)}
        C = {}
        Carray = []

        for k in range(1, len(DICOMImages)):
            Sk = array(deltaS["delta" + str(k)]).astype(float)
            Cr = 0
            for j in range(len(So)):
                # extract average enhancement over the lesion at each time point
                Cr = Cr + (Sk[j] - So[j]) / So[j]
                Carray.append((Sk[j] - So[j]) / So[j])

            # compile
            C["C" + str(k)] = Carray
            Crk["Cr" + str(k)] = Cr / len(Sk)

        # Extract Fii_1
        for k in range(1, 5):
            currentCr = array(Crk["Cr" + str(k)]).astype(float)
            print currentCr
            if self.maxCr < currentCr:
                self.maxCr = float(currentCr)
                self.peakCr = int(k)

        print "Maximum Upate (Fii_1) = %d " % self.maxCr
        print "Peak Cr (Fii_2) = %d " % self.peakCr

        # Uptake rate
        self.UptakeRate = float(self.maxCr / self.peakCr)
        print "Uptake rate (Fii_3) "
        print self.UptakeRate

        # WashOut Rate
        if self.peakCr == 4:
            self.washoutRate = 0
        else:
            self.washoutRate = float((self.maxCr - array(Crk["Cr" + str(4)]).astype(float)) / (4 - self.peakCr))
        print "WashOut rate (Fii_4) "
        print self.washoutRate

        ##################################################
        # Now Calculate enhancement-variance Kinetic based features
        # Based on Crk['Cr'+str(k)] = Cr/len(Sk)
        print "\n Saving %s" % "Vrk"
        Vrk = {}

        for k in range(1, 5):
            Ci = array(C["C" + str(k)]).astype(float)
            Cri = array(Crk["Cr" + str(k)]).astype(float)
            Vr = 0
            for j in range(len(Ci)):
                # extract average enhancement over the lesion at each time point
                Vr = Vr + (Ci[j] - Cri) ** 2
            # compile
            Vrk["Vr" + str(k)] = Vr / (len(Ci) - 1)

        # Extract Fiii_1
        for k in range(1, 5):
            currentVr = array(Vrk["Vr" + str(k)]).astype(float)
            if self.maxVr < currentVr:
                print currentVr
                self.maxVr = float(currentVr)
                self.peakVr = int(k)

        print "Maximum Variation of enhan (Fiii_1) = %d " % self.maxVr
        print "Peak Vr (Fii_2) = %d " % self.peakVr

        # Vr_increasingRate
        self.Vr_increasingRate = self.maxVr / self.peakVr
        print "Vr_increasingRate (Fiii_3)"
        print self.Vr_increasingRate

        # Vr_decreasingRate
        if self.peakVr == 4:
            self.Vr_decreasingRate = 0
        else:
            self.Vr_decreasingRate = float((self.maxVr - array(Vrk["Vr" + str(4)]).astype(float)) / (4 - self.peakVr))
        print "Vr_decreasingRate (Fiii_4) "
        print self.Vr_decreasingRate

        # Vr_post_1
        self.Vr_post_1 = float(array(Vrk["Vr" + str(1)]).astype(float))
        print "Vr_post_1 (Fiii_5)"
        print self.Vr_post_1

        ##################################################
        # orgamize into dataframe
        self.dynamicEMM_inside = DataFrame(
            data=array(
                [
                    [
                        self.amp,
                        self.alpha,
                        self.beta,
                        self.iAUC1,
                        self.Slope_ini,
                        self.Tpeak,
                        self.Kpeak,
                        self.SER,
                        self.maxCr,
                        self.peakCr,
                        self.UptakeRate,
                        self.washoutRate,
                        self.maxVr,
                        self.peakVr,
                        self.Vr_increasingRate,
                        self.Vr_decreasingRate,
                        self.Vr_post_1,
                    ]
                ]
            ),
            columns=[
                "A.inside",
                "alpha.inside",
                "beta.inside",
                "iAUC1.inside",
                "Slope_ini.inside",
                "Tpeak.inside",
                "Kpeak.inside",
                "SER.inside",
                "maxCr.inside",
                "peakCr.inside",
                "UptakeRate.inside",
                "washoutRate.inside",
                "maxVr.inside",
                "peakVr.inside",
                "Vr_increasingRate.inside",
                "Vr_decreasingRate.inside",
                "Vr_post_1.inside",
            ],
        )

        #############################################################
        # try to plot results
        pylab.figure()
        pylab.errorbar(t, data, yerr=se_deltaS, fmt="ro", label="data+SE")  # data 'ro' red dots as markers
        pylab.plot(t, final, "b+", label="data+residuals")  # data+residuals 'b+' blue pluses
        pylab.plot(t, model, "b", label="model")  # model fit 'b' blue
        pylab.plot(x, model_res, "k", label="model fit")  # model fit 'k' blakc
        pylab.xlabel(" post-contrast time (min)")
        pylab.ylabel("delta S(t)")
        pylab.legend()

        return self.dynamicEMM_inside
    def extractfeatures_contour(self, DICOMImages, image_pos_pat, image_ori_pat, series_path, phases_series, VOI_mesh):
        """ Start pixVals for collection pixel values at VOI """
        pixVals = []
        deltaS = {}
        
        # necessary to read point coords
        VOIPnt = [0,0,0]
        ijk = [0,0,0]
        pco = [0,0,0]
        
        for i in range(len(DICOMImages)):
            abspath_PhaseID = series_path+os.sep+str(phases_series[i]) 
            print phases_series[i]
             
            # Get total number of files
            [len_listSeries_files, FileNms_slices_sorted_stack] = processDicoms.ReadDicomfiles(abspath_PhaseID)
            mostleft_slice = FileNms_slices_sorted_stack.slices[0]
            
            # Get dicom header, retrieve
            dicomInfo_series = dicom.read_file(abspath_PhaseID+os.sep+str(mostleft_slice)) 
            
            # (0008,0031) AT S Series Time            # hh.mm.ss.frac
            seriesTime = str(dicomInfo_series[0x0008,0x0031].value) 
            # (0008,0033) AT S Image Time             # hh.mm.ss.frac
            imageTime = str(dicomInfo_series[0x0008,0x0033].value)
            
            # (0008,0032) AT S Acquisition Time       # hh.mm.ss.frac
            ti = str(dicomInfo_series[0x0008,0x0032].value) 
            
            acquisitionTimepoint = datetime.time(hour=int(ti[0:2]), minute=int(ti[2:4]), second=int(ti[4:6]))
            self.timepoints.append( datetime.datetime.combine(datetime.date.today(), acquisitionTimepoint) )
            
            # find mapping to Dicom space  
            [transformed_image, transform_cube] = Display().dicomTransform(DICOMImages[i], image_pos_pat, image_ori_pat)
 
            for j in range( VOI_mesh.GetNumberOfPoints() ):
                VOI_mesh.GetPoint(j, VOIPnt)      
                
                # extract pixID at location VOIPnt
                pixId = transformed_image.FindPoint(VOIPnt[0], VOIPnt[1], VOIPnt[2])
                im_pt = [0,0,0]
                
                transformed_image.GetPoint(pixId,im_pt)           
                inorout = transformed_image.ComputeStructuredCoordinates( im_pt, ijk, pco)
                if(inorout == 0):
                    pass
                else:
                    pixValx = transformed_image.GetScalarComponentAsFloat( ijk[0], ijk[1], ijk[2], 0)
                    pixVals.append(pixValx)
                        
            # Now collect pixVals
            print "Saving %s" % 'delta'+str(i)
            deltaS['delta'+str(i)] = pixVals
            pixVals = []
                    
        print self.timepoints
        
        # Collecting timepoints in proper format
        t_delta = []
        t_delta.append(0)
        total_time = 0
        for i in range(len(DICOMImages)-1):
            current_time = self.timepoints[i+1]
            previous_time = self.timepoints[i]
            difference_time =current_time - previous_time
            timestop = divmod(difference_time.total_seconds(), 60)
            t_delta.append( t_delta[i] + timestop[0]+timestop[1]*(1./60))
            total_time = total_time+timestop[0]+timestop[1]*(1./60)
            
        # finally print t_delta
        print t_delta
        t = array(t_delta)
        print "total_time"
        print total_time
        
        ##############################################################
        # Finished sampling deltaS
        # APply lmfit to deltaS
        # first sample the mean
        data_deltaS = []; t_deltaS = []; mean_deltaS = []; sd_deltaS = []; se_deltaS = []; n_deltaS = []
        
        # append So and to
        data_deltaS.append( 0 )       
        t_deltaS.append(0)
        mean_deltaS.append( mean(deltaS['delta0']) )
        sd_deltaS.append(0)
        se_deltaS.append(0)
        n_deltaS.append( len(deltaS['delta0']) )
        
        for k in range(1,len(DICOMImages)):
            deltaS_i =  ( mean(array(deltaS['delta'+str(k)]).astype(float)) -  mean(deltaS['delta0']) )/  mean(deltaS['delta0'])
            data_deltaS.append( deltaS_i )
            t_deltaS.append(k)
            print 'delta'+str(k)
            print  data_deltaS[k]
            
            ##############################################################
            # Calculate data_error
            # estimate the population mean and SD from our samples to find SE
            # SE tells us the distribution of individual scores around the sampled mean.
            mean_deltaS_i = mean(array(deltaS['delta'+str(k)]))
            std_deltaS_i = std(array(deltaS['delta'+str(k)]))
            n_deltaS_i = len(array(deltaS['delta'+str(k)]))
                
            sd_deltaS.append( std_deltaS_i )
            mean_deltaS.append( mean_deltaS_i )
            
            # Standard Error of the mean SE
            # the smaller the variability in the data, the more confident we are that one value (the mean) accurately reflects them.
            se_deltaS.append(std_deltaS_i/sqrt(n_deltaS_i))
            n_deltaS.append(n_deltaS_i)
                        
        # make array for data_deltaS
        data = array(data_deltaS)
        
        print "\n================\nMean and SE (i.e VOI sample data)"
        print mean_deltaS
        print se_deltaS
        
        # create a set of Parameters
        params = Parameters()
        params.add('amp',   value= 10,  min=0)
        params.add('alpha', value= 1, min=0) 
        params.add('beta', value= 0.05, min=0.0001, max=0.9)
        
        # do fit, here with leastsq model
        # define objective function: returns the array to be minimized
        def fcn2min(params, t, data):
            global model, model_res, x
            """ model EMM for Bilateral DCE-MRI, subtract data"""
            # unpack parameters:
            #  extract .value attribute for each parameter
            amp = params['amp'].value    # Upper limit of deltaS
            alpha = params['alpha'].value    # rate of signal increase min-1
            beta = params['beta'].value        # rate of signal decrease min-1
                    
            model = amp * (1- exp(-alpha*t)) * exp(-beta*t)
            
            x = linspace(0, t[4], 101)
            model_res = amp * (1- exp(-alpha*x)) * exp(-beta*x)
        
            return model - data
        
        #####
        myfit = Minimizer(fcn2min,  params, fcn_args=(t,), fcn_kws={'data':data})
        myfit.prepare_fit()
        myfit.leastsq()
            
        # On a successful fit using the leastsq method, several goodness-of-fit statistics
        # and values related to the uncertainty in the fitted variables will be calculated
        print "myfit.success"
        print myfit.success
        print "myfit.residual"
        print myfit.residual
        print "myfit.chisqr"
        print myfit.chisqr
        print "myfit.redchi"
        print myfit.redchi
            
        # calculate final result
        #final = data + myfit.residual
        # write error report
        report_errors(params)
        
        # Calculate R-square
        # R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
        R_square = sum( (model - mean(data))**2 )/ sum( (data - mean(data))**2 )
        print "R^2"
        print R_square
        
        self.amp = params['amp'].value
        self.alpha = params['alpha'].value
        self.beta = params['beta'].value
        
        ##################################################
        # Now Calculate Extract parameters from model
        self.iAUC1 = params['amp'].value *( ((1-exp(-params['beta'].value*t[1]))/params['beta'].value) + (exp((-params['alpha'].value+params['beta'].value)*t[1])-1)/(params['alpha'].value+params['beta'].value) )
        print "iAUC1"
        print self.iAUC1
        
        self.Slope_ini = params['amp'].value*params['alpha'].value
        print "Slope_ini"
        print self.Slope_ini
    
        self.Tpeak = (1/params['alpha'].value)*log(1+(params['alpha'].value/params['beta'].value))
        print "Tpeak"
        print self.Tpeak
    
        self.Kpeak = -params['amp'].value * params['alpha'].value * params['beta'].value
        print "Kpeak"
        print self.Kpeak
    
        self.SER = exp( (t[4]-t[1])*params['beta'].value) * ( (1-exp(-params['alpha'].value*t[1]))/(1-exp(-params['alpha'].value*t[4])) )
        print "SER"
        print self.SER
        
        ##################################################
        # Now Calculate enhancement Kinetic based features
        # Based on the course of signal intensity within the lesion
        print "\n Saving %s" % 'Crk'
        So = array(deltaS['delta0']).astype(float)
        Crk = {'Cr0': mean(So)}  
        C = {}
        
        for k in range(1,len(DICOMImages)):
            Sk = array(deltaS['delta'+str(k)]).astype(float)
            print Sk
            Cr = 0
            Carray = []
            for j in range( len(So) ):
                # extract average enhancement over the lesion at each time point
                Cr = Cr + (Sk[j] - So[j])/So[j]
                Carray.append((Sk[j] - So[j])/So[j])
                
            # compile
            C['C'+str(k)] = Carray
            Crk['Cr'+str(k)] = float(Cr/len(Sk))
        
        # Extract Fii_1
        for k in range(1,5):
            currentCr = array(Crk['Cr'+str(k)]).astype(float)
            print currentCr
            if( self.maxCr < currentCr):
                self.maxCr = float(currentCr)
                self.peakCr = int(k)
                
        print "Maximum Upate (Fii_1) = %d " %  self.maxCr
        print "Peak Cr (Fii_2) = %d " %  self.peakCr
        
        # Uptake rate
        self.UptakeRate = float(self.maxCr/self.peakCr)    
        print "Uptake rate (Fii_3) "
        print self.UptakeRate
        
        # WashOut Rate
        if( self.peakCr == 4):
            self.washoutRate = 0
        else:
            self.washoutRate = float( (self.maxCr - array(Crk['Cr'+str(4)]).astype(float))/(4-self.peakCr) )
        print "WashOut rate (Fii_4) "
        print self.washoutRate


        ##################################################
        # Now Calculate enhancement-variance Kinetic based features
        # Based on Crk['Cr'+str(k)] = Cr/len(Sk)
        print "\n Saving %s" % 'Vrk'
        Vrk = {}
        
        for k in range(1,5):
            Ci = array(C['C'+str(k)]).astype(float)    
            Cri = array(Crk['Cr'+str(k)]).astype(float)
            Vr = 0
            for j in range( len(Ci) ):
                # extract average enhancement over the lesion at each time point
                Vr = Vr + (Ci[j] - Cri)**2
            # compile
            Vrk['Vr'+str(k)] = Vr/(len(Ci)-1)
        
        # Extract Fiii_1
        for k in range(1,5):
            currentVr = array(Vrk['Vr'+str(k)]).astype(float)
            if( self.maxVr < currentVr):
                print currentVr
                self.maxVr = float(currentVr)
                self.peakVr = int(k)
        
        print "Maximum Variation of enhan (Fiii_1) = %d " %  self.maxVr
        print "Peak Vr (Fii_2) = %d " %  self.peakVr
        
        # Vr_increasingRate 
        self.Vr_increasingRate = self.maxVr/self.peakVr    
        print "Vr_increasingRate (Fiii_3)" 
        print self.Vr_increasingRate
        
        # Vr_decreasingRate
        if( self.peakVr == 4):
            self.Vr_decreasingRate = 0
        else:
            self.Vr_decreasingRate = float((self.maxVr - array(Vrk['Vr'+str(4)]).astype(float))/(4-self.peakVr))
        print "Vr_decreasingRate (Fiii_4) "
        print self.Vr_decreasingRate
        
        # Vr_post_1 
        self.Vr_post_1 = float( array(Vrk['Vr'+str(1)]).astype(float))
        print "Vr_post_1 (Fiii_5)"
        print self.Vr_post_1
 
        ##################################################
        # orgamize into dataframe
        self.dynamicEMM_contour = DataFrame( data=array([[ self.amp, self.alpha, self.beta, self.iAUC1, self.Slope_ini, self.Tpeak, self.Kpeak, self.SER, self.maxCr, self.peakCr, self.UptakeRate, self.washoutRate, self.maxVr, self.peakVr, self.Vr_increasingRate, self.Vr_decreasingRate, self.Vr_post_1]]), 
                                columns=['A.contour', 'alpha.contour', 'beta.contour', 'iAUC1.contour', 'Slope_ini.contour', 'Tpeak.contour', 'Kpeak.contour', 'SER.contour', 'maxCr.contour', 'peakCr.contour', 'UptakeRate.contour', 'washoutRate.contour', 'maxVr.contour', 'peakVr.contour','Vr_increasingRate.contour', 'Vr_decreasingRate.contour', 'Vr_post_1.contour'])

        #############################################################
        # try to plot results
        pylab.figure()
        pylab.errorbar(t, data, yerr=se_deltaS, fmt='ro', label='data+SE') # data 'ro' red dots as markers
        pylab.plot(t, model, 'b', label='model')    # model fit 'b' blue
        pylab.plot(x, model_res, 'k', label='model fit')    # model fit 'k' blakc
        pylab.xlabel(" post-contrast time (min)")
        pylab.ylabel("delta S(t)")
        pylab.legend()
        
        return self.dynamicEMM_contour
Beispiel #18
0
    #print phase
    if np.size(hjd) == 1:
        if phase < 0.0:
            return phase + 1.0
        else:
            return phase
    else:
        ss = phase < 0.0
        phase[ss] = phase[ss] + 1.0
        return phase


results = lm.minimize(res_sin3, params, args=(hjd - int(op.hjd0), vel))
print('Best fit for ' + op.object)
print('--------------')
lm.report_errors(results.params, show_correl=False)
print('--------------')
print('DoF = ', results.nfree)
print('Chi-squared = ', results.chisqr)
print('Red_chi^2 = ', results.redchi)
print('RMS of resiudals = ', np.std(results.residual))
print('--------------')
if op.scale_errors:
    rescale = np.sqrt(results.chisqr / results.nfree)
else:
    rescale = 1.0

print('Rescale factor of errorbars = ', rescale)

phase1 = []
phase = phaser(hjd, results.params['hjd0'].value + int(op.hjd0),
Beispiel #19
0
    porb = pars['porb'].value
    hjd0 = pars['hjd0'].value
    gama = pars['gama'].value
    k1 = pars['k1'].value
    phaseoff=pars['phaseoff'].value
    model=gama+k1*n.sin(2*n.pi*((x-hjd0)/porb+phaseoff))
    if data is None:
        return model
    if sigma is  None:
        return (model - data)
    return (model - data)/sigma

results=lm.minimize(res_sin3,params, args=(hjd,vel))
print 'Best fit for '+op.object
print '--------------'
lm.report_errors(params,show_correl=False)
print '--------------'
print 'DoF = ',results.nfree
print 'Chi-squared = ',results.chisqr
phase1=[]
for i in hjd:
	tmp=(i-params['hjd0'].value)/params['porb'].value-n.fix((i-params['hjd0'].value)/params['porb'].value)
	if tmp < 0.0:
		tmp = tmp+1.
	phase1.append(tmp)
phase=n.array(phase1)
    
fig=plt.figure(1,facecolor='w')
plt.clf() 
ax1=plt.subplot2grid((4, 1), (0, 0),rowspan=3)
#ffpar=cv.orbital(phase,vel,[70.0,30.0,saved[3][0],porb])
Beispiel #20
0
    def extractfeatures_inside(self, DICOMImages, image_pos_pat, image_ori_pat, series_path, phases_series, VOI_mesh):
        """ Start pixVals for collection pixel values at VOI """
        pixVals = []
        deltaS = {}
        
        # necessary to read point coords
        VOIPnt = [0,0,0]
        ijk = [0,0,0]
        pco = [0,0,0]
        
        for i in range(len(DICOMImages)):
            abspath_PhaseID = series_path+os.sep+str(phases_series[i]) 
            print phases_series[i]
            
            # Get total number of files
            load = Inputs_init()
            [len_listSeries_files, FileNms_slices_sorted_stack] = load.ReadDicomfiles(abspath_PhaseID)
            mostleft_slice = FileNms_slices_sorted_stack.slices[0]
            
            # Get dicom header, retrieve
            dicomInfo_series = dicom.read_file(abspath_PhaseID+os.sep+str(mostleft_slice)) 
            
            # (0008,0031) AT S Series Time            # hh.mm.ss.frac
            seriesTime = str(dicomInfo_series[0x0008,0x0031].value) 
            # (0008,0033) AT S Image Time             # hh.mm.ss.frac
            imageTime = str(dicomInfo_series[0x0008,0x0033].value)
            
            # (0008,0032) AT S Acquisition Time       # hh.mm.ss.frac
            ti = str(dicomInfo_series[0x0008,0x0032].value) 
            
            acquisitionTimepoint = datetime.time(hour=int(ti[0:2]), minute=int(ti[2:4]), second=int(ti[4:6]))
            self.timepoints.append( datetime.datetime.combine(datetime.date.today(), acquisitionTimepoint) )
            
            # find mapping to Dicom space  
            [transformed_image, transform_cube] = Display().dicomTransform(DICOMImages[i], image_pos_pat, image_ori_pat)
            
            ### Get inside of VOI            
            [VOI_scalars, VOIdims] = self.createMaskfromMesh(VOI_mesh, transformed_image)
            print "\n VOIdims"
            print VOIdims
            
            # get non zero elements
            image_scalars = transformed_image.GetPointData().GetScalars()
            numpy_VOI_imagedata = vtk_to_numpy(image_scalars)     
            
            numpy_VOI_imagedata = numpy_VOI_imagedata.reshape(VOIdims[2], VOIdims[1], VOIdims[0]) 
            numpy_VOI_imagedata = numpy_VOI_imagedata.transpose(2,1,0)
            
            print "Shape of VOI_imagedata: "
            print numpy_VOI_imagedata.shape
            
            #################### HERE GET IT AND MASK IT OUT
            self.nonzeroVOIextracted = nonzero(VOI_scalars)
            print self.nonzeroVOIextracted
            
            VOI_imagedata = numpy_VOI_imagedata[self.nonzeroVOIextracted]     
            
            print "shape of VOI_imagedata  Clipped:"
            print VOI_imagedata.shape
        
            for j in range( len(VOI_imagedata) ):
                pixValx = VOI_imagedata[j]
                pixVals.append(pixValx)
                        
            # Now collect pixVals
            print "Saving %s" % 'delta'+str(i)
            deltaS['delta'+str(i)] = pixVals
            pixVals = []
                    
        print self.timepoints
        
        # Collecting timepoints in proper format
        t_delta = []
        t_delta.append(0)
        total_time = 0
        for i in range(len(DICOMImages)-1):
            current_time = self.timepoints[i+1]
            previous_time = self.timepoints[i]
            difference_time =current_time - previous_time
            timestop = divmod(difference_time.total_seconds(), 60)
            t_delta.append( t_delta[i] + timestop[0]+timestop[1]*(1./60))
            total_time = total_time+timestop[0]+timestop[1]*(1./60)
            
        # finally print t_delta
        print t_delta
        t = array(t_delta)
        print "total_time"
        print total_time
        
        ##############################################################
        # Finished sampling deltaS
        # APply lmfit to deltaS
        # first sample the mean
        data_deltaS = []; t_deltaS = []; mean_deltaS = []; sd_deltaS = []; se_deltaS = []; n_deltaS = []
        
        # append So and to
        data_deltaS.append( 0 )       
        t_deltaS.append(0)
        mean_deltaS.append( mean(deltaS['delta0']) )
        sd_deltaS.append(0)
        se_deltaS.append(0)
        n_deltaS.append( len(deltaS['delta0']) )
        
        for k in range(1,len(DICOMImages)):
            deltaS_i =  ( mean(array(deltaS['delta'+str(k)]).astype(float)) -  mean(deltaS['delta0']) )/  mean(deltaS['delta0'])
            data_deltaS.append( deltaS_i )
            t_deltaS.append(k)
            print 'delta'+str(k)
            print  data_deltaS[k]
            
            ##############################################################
            # Calculate data_error
            # estimate the population mean and SD from our samples to find SE
            # SE tells us the distribution of individual scores around the sampled mean.
            mean_deltaS_i = mean(array(deltaS['delta'+str(k)]))
            std_deltaS_i = std(array(deltaS['delta'+str(k)]))
            n_deltaS_i = len(array(deltaS['delta'+str(k)]))
                
            sd_deltaS.append( std_deltaS_i )
            mean_deltaS.append( mean_deltaS_i )
            
            # Standard Error of the mean SE
            # the smaller the variability in the data, the more confident we are that one value (the mean) accurately reflects them.
            se_deltaS.append(std_deltaS_i/sqrt(n_deltaS_i))
            n_deltaS.append(n_deltaS_i)
                        
        # make array for data_deltaS
        data = array(data_deltaS)
        
        print "\n================\nMean and SE (i.e VOI sample data)"
        print mean_deltaS
        print se_deltaS
        
        # create a set of Parameters
        params = Parameters()
        params.add('amp',   value= 10,  min=0)
        params.add('alpha', value= 1, min=0) 
        params.add('beta', value= 0.05, min=0.0001, max=0.9)
        
        # do fit, here with leastsq model
        # define objective function: returns the array to be minimized
        def fcn2min(params, t, data):
            global model, model_res, x
            """ model EMM for Bilateral DCE-MRI, subtract data"""
            # unpack parameters:
            #  extract .value attribute for each parameter
            amp = params['amp'].value    # Upper limit of deltaS
            alpha = params['alpha'].value    # rate of signal increase min-1
            beta = params['beta'].value        # rate of signal decrease min-1
                    
            model = amp * (1- exp(-alpha*t)) * exp(-beta*t)
            
            x = linspace(0, t[4], 101)
            model_res = amp * (1- exp(-alpha*x)) * exp(-beta*x)
        
            return model - data
        
        #####
        myfit = Minimizer(fcn2min,  params, fcn_args=(t,), fcn_kws={'data':data})
        myfit.prepare_fit()
        myfit.leastsq()
            
        # On a successful fit using the leastsq method, several goodness-of-fit statistics
        # and values related to the uncertainty in the fitted variables will be calculated
        print "myfit.success"
        print myfit.success
        print "myfit.residual"
        print myfit.residual
        print "myfit.chisqr"
        print myfit.chisqr
        print "myfit.redchi"
        print myfit.redchi
            
        # calculate final result
        final = data + myfit.residual
        # write error report
        report_errors(params)
        
        # Calculate R-square
        # R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
        R_square = sum( (model - mean(data))**2 )/ sum( (data - mean(data))**2 )
        print "R^2"
        print R_square
        
        self.amp = params['amp'].value
        self.alpha = params['alpha'].value
        self.beta = params['beta'].value
        
        ##################################################
        # Now Calculate Extract parameters from model
        self.iAUC1 = params['amp'].value *( ((1-exp(-params['beta'].value*t[1]))/params['beta'].value) + (exp((-params['alpha'].value+params['beta'].value)*t[1])-1)/(params['alpha'].value+params['beta'].value) )
        print "iAUC1"
        print self.iAUC1
        
        self.Slope_ini = params['amp'].value*params['alpha'].value
        print "Slope_ini"
        print self.Slope_ini
    
        self.Tpeak = (1/params['alpha'].value)*log(1+(params['alpha'].value/params['beta'].value))
        print "Tpeak"
        print self.Tpeak
    
        self.Kpeak = -params['amp'].value * params['alpha'].value * params['beta'].value
        print "Kpeak"
        print self.Kpeak
    
        self.SER = exp( (t[4]-t[1])*params['beta'].value) * ( (1-exp(-params['alpha'].value*t[1]))/(1-exp(-params['alpha'].value*t[4])) )
        print "SER"
        print self.SER
        
        ##################################################
        # Now Calculate enhancement Kinetic based features
        # Based on the course of signal intensity within the lesion
        print "\n Saving %s" % 'Crk'
        So = array(deltaS['delta0']).astype(float)
        Crk = {'Cr0': mean(So)}  
        C = {}
        Carray = []
        
        for k in range(1,len(DICOMImages)):
            Sk = array(deltaS['delta'+str(k)]).astype(float)
            Cr = 0
            for j in range( len(So) ):
                # extract average enhancement over the lesion at each time point
                Cr = Cr + (Sk[j] - So[j])/So[j]
                Carray.append((Sk[j] - So[j])/So[j])
                
            # compile
            C['C'+str(k)] = Carray
            Crk['Cr'+str(k)] = Cr/len(Sk)
        
        # Extract Fii_1
        for k in range(1,5):
            currentCr = array(Crk['Cr'+str(k)]).astype(float)
            print currentCr
            if( self.maxCr < currentCr):
                self.maxCr = float(currentCr)
                self.peakCr = int(k)
                
        print "Maximum Upate (Fii_1) = %d " %  self.maxCr
        print "Peak Cr (Fii_2) = %d " %  self.peakCr
        
        # Uptake rate
        self.UptakeRate = float(self.maxCr/self.peakCr)    
        print "Uptake rate (Fii_3) "
        print self.UptakeRate
        
        # WashOut Rate
        if( self.peakCr == 4):
            self.washoutRate = 0
        else:
            self.washoutRate = float( (self.maxCr - array(Crk['Cr'+str(4)]).astype(float))/(4-self.peakCr) )
        print "WashOut rate (Fii_4) "
        print self.washoutRate


        ##################################################
        # Now Calculate enhancement-variance Kinetic based features
        # Based on Crk['Cr'+str(k)] = Cr/len(Sk)
        print "\n Saving %s" % 'Vrk'
        Vrk = {}
        
        for k in range(1,5):
            Ci = array(C['C'+str(k)]).astype(float)    
            Cri = array(Crk['Cr'+str(k)]).astype(float)
            Vr = 0
            for j in range( len(Ci) ):
                # extract average enhancement over the lesion at each time point
                Vr = Vr + (Ci[j] - Cri)**2
            # compile
            Vrk['Vr'+str(k)] = Vr/(len(Ci)-1)
        
        # Extract Fiii_1
        for k in range(1,5):
            currentVr = array(Vrk['Vr'+str(k)]).astype(float)
            if( self.maxVr < currentVr):
                print currentVr
                self.maxVr = float(currentVr)
                self.peakVr = int(k)
        
        print "Maximum Variation of enhan (Fiii_1) = %d " %  self.maxVr
        print "Peak Vr (Fii_2) = %d " %  self.peakVr
        
        # Vr_increasingRate 
        self.Vr_increasingRate = self.maxVr/self.peakVr    
        print "Vr_increasingRate (Fiii_3)" 
        print self.Vr_increasingRate
        
        # Vr_decreasingRate
        if( self.peakVr == 4):
            Vr_decreasingRate = 0
        else:
            Vr_decreasingRate = float((self.maxVr - array(Vrk['Vr'+str(4)]).astype(float))/(4-self.peakVr))
        print "Vr_decreasingRate (Fiii_4) "
        print Vr_decreasingRate
        
        # Vr_post_1 
        self.Vr_post_1 = float( array(Vrk['Vr'+str(1)]).astype(float))
        print "Vr_post_1 (Fiii_5)"
        print self.Vr_post_1
 
        ##################################################
        # orgamize into dataframe
        self.dynamicEMM_inside = DataFrame( data=array([[ self.amp, self.alpha, self.beta, self.iAUC1, self.Slope_ini, self.Tpeak, self.Kpeak, self.SER, self.maxCr, self.peakCr, self.UptakeRate, self.washoutRate, self.maxVr, self.peakVr, self.Vr_increasingRate, self.Vr_post_1]]), 
                                columns=['A.inside', 'alpha.inside', 'beta.inside', 'iAUC1.inside', 'Slope_ini.inside', 'Tpeak.inside', 'Kpeak.inside', 'SER.inside', 'maxCr.inside', 'peakCr.inside', 'UptakeRate.inside', 'washoutRate.inside', 'maxVr.inside', 'peakVr.inside','Vr_increasingRate.inside', 'Vr_post_1.inside'])

        #############################################################
        # try to plot results
        pylab.figure()
        pylab.errorbar(t, data, yerr=se_deltaS, fmt='ro', label='data+SE') # data 'ro' red dots as markers
        pylab.plot(t, final, 'b+', label='data+residuals')    # data+residuals 'b+' blue pluses
        pylab.plot(t, model, 'b', label='model')    # model fit 'b' blue
        pylab.plot(x, model_res, 'k', label='model fit')    # model fit 'k' blakc
        pylab.xlabel(" post-contrast time (min)")
        pylab.ylabel("delta S(t)")
        pylab.legend()
        
        return self.dynamicEMM_inside
Beispiel #21
0
    def _findDB_old(self,directions=[45,135,-45,-135]):
        '''Function determining the direct beam position on the given scattering image.

        **Arguments:**

        *xi*,*yi*: float tupile
            Initial estimates of the direct beam position.

        *img*: 2D array
            Detector image.

        *dchi*: float
            Azimuthal angle in deg used for averaging.

        *pix_size* float
            Pixel size in [m]. Required by the pyFAI integrator function.

        *wavelength* float
            X-ray wavelength in [m]

        *dist*  float
            sample-detector distance in [mm]

        *rmin* int
            lower bound for the fitted ROI (in pixels)

        *rmax*
            upper bound for the fitted ROI (in pixels)

        *directions* 4 or 3 element list
            list of angles used for azimuthal integration and fitting

        Returns:

        *[xm,ym]*: float list
            Direct beam position
        '''
        xi = self.Parameters['cenx']
        yi = self.Parameters['ceny']
        img = self.static
        mask = self.mask
        if self.Parameters['oldInputFormat']:
            mask = (mask + 1) % 2
        dchi = self.Parameters['dchi']
        pixSize = self.Parameters['pixSize'] * 1e-3
        sdDist = self.Parameters['sdDist'] * 1e-3
        rmin = self.Parameters['rmin']
        rmax = self.Parameters['rmax']
        wavelength = self.Parameters['wavelength'] * 1e-10
        params = lmfit.Parameters()
        params.add('xi', value = xi, vary = True)
        params.add('yi', value = yi, vary = True)
        fitOut = lmfit.minimize(resid_db,params,\
                args=(img,mask,dchi,pixSize,wavelength,sdDist,rmin,rmax,directions),full_output=1)
        fitOut.leastsq()
        lmfit.report_errors(fitOut.params)
        # Updating parameters:
        self.Parameters['cenx'] = round(fitOut.params['xi'].value, 2)
        self.Parameters['ceny'] = round(fitOut.params['yi'].value, 2)
        # Adding the new direct beam position to the input file:
        try:
            self.config.set('Main','cenx',value = '%.2f' % fitOut.params['xi'].value)
            self.config.set('Main','ceny',value = '%.2f' % fitOut.params['yi'].value)
        except:
            self.config.set('Beam','cenx',value = '%.2f' % fitOut.params['xi'].value)
            self.config.set('Beam','ceny',value = '%.2f' % fitOut.params['yi'].value)
        f = open(self.inputFileName,'w')
        self.config.write(f)
        f.close()
    
    params = lmfit.Parameters()
    
    params.add('A', value = 0, vary = True) ##cos ### -3 cXZ sin 2 chi: 7 +- 23 mHz
    params.add('B', value = 0, vary = True) ##sin ### -3 cYZ sin 2 chi: 32 +- 56 mHz
    params.add('C', value = 0, vary = True) ##cos2 ### -1.5 (cXX-cYY) sin^2 chi: 15 +- 22 mHz
    params.add('D', value = 0, vary = True) ##sin2 ### -3 cXY sin^2 chi: 8 +- 20 mHz
    #params.add('offset', value = 0.0)
    
    result = lmfit.minimize(cosine_fit, params, args = (x, y, yerr))
    
    residual_array = result.residual*yerr
    
    #fit_values  = y + result.residual
    
    lmfit.report_errors(params, min_correl=0)
    
    print "Reduced chi-squared = ", result.redchi
    
    #print result.redchi
    
    #print 1/params['freq'].value/3600
    
    x_plot = np.linspace(x.min(),x.max(),1000)
    
    
    pyplot.plot(x,y,'-')
    #pyplot.plot(x_plot,cosine_model(params,x_plot),linewidth = 3.0)
#pyplot.errorbar(time_array,freq_array,width_array, linestyle='None',markersize = 4.0,fmt='o',color='black')

    
def main():

    # ----------------------------------------------------------------------
    # import data
    # ----------------------------------------------------------------------

    data = genfromtxt(sys.argv[1])

    t_c = data[2:, 0]  # time
    y_c = data[2:, 1]  # measured decay curve y(t)
    g_c = data[2:, 2]  # impulse g(t)

    # ----------------------------------------------------------------------
    # lmfit section
    # ----------------------------------------------------------------------

    params = Parameters()
    params.add("I_0", value=1.0)
    params.add("time0", value=1.0)
    # minimize residuals using lmfit
    # with Levenberg-Marquardt method,
    # the first, "signal" fitting procedure

    rezult = minimize(Conv_residuals, params, args=(t_c, y_c, g_c), method="leastsq")
    # --------------------------------------------------------------------
    # leastsq, nelder, lbfgsb, anneal, powell, cg, newton, cobyla, slsqp

    provisional_parameters = rezult  # will be printed at the end of the code

    residuals = Conv_residuals(rezult.params, t_c, y_c, g_c)  # residuals
    y_lmfit = y_c + residuals  # fitted line

    # ----------------------------------------------------------------------
    # bootstrapping section
    # ----------------------------------------------------------------------

    I_0 = []  # arrays of calculated parameters
    t_0 = []  # during bootstrapping

    iteration = int(input("How many bootstrapping iterations? "))

    # number of boostrap procedures

    for i in range(iteration):

        params = Parameters()  # repeat lmfit minimize using
        params.add("I_0", value=10.0)  # Levenberg-Marquardt method
        params.add("time0", value=1.0)  # with
        # y(t) + randomly sampled residuals

        rezult = minimize(Conv_residuals, params, args=(t_c, bootstrap(y_c, residuals), g_c), method="leastsq")

        I_0.append(rezult.params["I_0"].value)
        t_0.append(rezult.params["time0"].value)

    # ----------------------------------------------------------------------
    # quantiles/errors and means
    # ----------------------------------------------------------------------

    err_I_0 = stats.mstats.mquantiles(I_0, [0.0, 1.0])
    err_t_0 = stats.mstats.mquantiles(t_0, [0.0, 1.0])

    # 0.25, 0.75 -quantiles may be used instead of the full span

    # ----------------------------------------------------------------------
    # runs test
    # ----------------------------------------------------------------------

    np = nm = 0  # number of positive and negative residuals, respectively
    nR = 1  # observed number of runs (changes of sign)

    if residuals[0] < 0:
        nm += 1

    for i in range(1, len(residuals)):  # loop for calculating
        # nm and nR
        if residuals[i] < 0:
            nm += 1

            if residuals[i - 1] > 0:
                nR += 1

        elif residuals[i - 1] < 0:
            nR += 1

    np = len(residuals) - nm  # np - number of positive residuals

    R = 1 + (2 * np * nm) / (np + nm)  # expected number of runs

    sigma_R = sqrt(2 * nm * np * (2 * nm * np - np - nm) / ((np + nm - 1) * (np + nm) ** 2))
    # variance of the expected number of runs

    if nR <= R:
        Z = (nR - R + 0.5) / sigma_R
    else:  # estimated standard normal
        Z = (nR - R - 0.5) / sigma_R  # distribution (Z-score)

    # ----------------------------------------------------------------------
    # report results of calculations
    # ----------------------------------------------------------------------

    print("\nLMFIT report:\n")  # results from the 'signal' fit
    report_errors(provisional_parameters)

    print(
        "\nBootstrapping report:\n\nI_0 =",
        "%.4f" % median(I_0),
        "\t(-",
        "%.4f" % (100 * ((median(I_0) - err_I_0[0]) / median(I_0))),
        "% / +",
        "%.4f" % (100 * ((err_I_0[1] - median(I_0)) / median(I_0))),
        "%)",
    )

    # NOTE! since the statistical approach
    # has been used, medians are more relevant
    # instead of means

    print(
        "t_0 =",
        "%.4f" % median(t_0),
        "\t(-",
        "%.4f" % (100 * ((median(t_0) - err_t_0[0]) / median(t_0))),
        "% / +",
        "%.4f" % (100 * ((err_t_0[1] - median(t_0)) / median(t_0))),
        "%)\n",
    )

    print(
        "Runs test:\n\n Numbers of points:\n n_m =",
        nm,
        "\n n_p =",
        np,
        "\n\n" "Observed number of runs n_R =",
        nR,
        "\n" "Expected number of runs R =",
        "%.4f" % R,
        "+/-",
        "%.4f" % sigma_R,
        "\n" "The standard normal distribution score Z =",
        "%.4f" % Z,
    )

    # ----------------------------------------------------------------------
    # plot section
    # ----------------------------------------------------------------------

    f_c = []  # luminescence decay as exp-model

    for i in range(len(t_c)):
        f_c.append(median(I_0) * exp(-(t_c[i]) / median(t_0)))

    suptitle(r"Decay kinetics of BaF$_2$ 78 nm nanoparticles", fontsize=18)

    subplot(211)
    plot(t_c, y_c / max(y_c), "bo", label=r"$y(t)$")  # all graphs are normalized
    plot(t_c, g_c / max(g_c), "ro", label=r"$g(t)$")
    plot(t_c, y_lmfit / max(y_lmfit), "m-", label="fitting curve")
    plot(t_c, f_c / max(f_c), "g.--", label=r"$f(t)$")

    xlabel("Time (ns)", fontsize=15)
    ylabel("Intensity (a.u.)", fontsize=16)
    legend(loc=1)

    subplot(212)
    stem(t_c, residuals, linefmt="g--", markerfmt="bs", basefmt="r-")
    xlabel("Time (ns)", fontsize=15)
    ylabel(r"Residuals $y - y_{model}$", fontsize=16)

    subplots_adjust(hspace=0.3, wspace=0.3, right=0.95, top=0.92)
    show()

    # ------------Histograms----------------------------

    suptitle(r"Decay kinetics of BaF$_2$ 78 nm nanoparticles", fontsize=18)

    subplot(121)
    hist(I_0, color="green")
    xlabel(r"$I_0$ (a.u.)", fontsize=16)
    ylabel(r"Frequency", fontsize=16)

    subplot(122)
    hist(t_0, color="green")
    xlabel(r"$t_0$ (ns)", fontsize=16)
    ylabel("Frequency", fontsize=16)

    subplots_adjust(hspace=0.4, left=0.1, right=0.95, top=0.92)
    show()
Beispiel #24
0
                 ])

x = time
y = v_rms
yerr = y*0.02

params = lmfit.Parameters()
 
params.add('A', value = 9.0)
params.add('B', value = 0.65)
 
result = lmfit.minimize(fit_fit, params, args = (x, y, yerr))
 
fit_values  = y + result.residual
 
lmfit.report_errors(result.params)

x_plot = np.linspace(x.min(),x.max(),200)

figure = pyplot.figure(0)

#print x_plot
#print fit_model(result.params,x_plot)
  
pyplot.errorbar(x,y,yerr, linestyle='None',markersize = 4.0,fmt='o',color='black')
pyplot.plot(x_plot,fit_model(result.params,x_plot),linewidth = 3.0)

#pyplot.xscale('log')
pyplot.yscale('log',basey = 10,subsy=[2, 3, 4, 5, 6, 7, 8, 9])   

pyplot.show()
Beispiel #25
0
                                         width[t_selection]))

params2 = start_params(time_w_pct[t_selection], width[t_selection])
lmfit.minimize(residuals, params2, args=(time_w_pct[t_selection],
                                         width[t_selection]))

params_center = start_params(time_w_pct[t_selection], center[t_selection])
params_center['background'].value = np.nanmean(width)
params_center['background'].min = 0
params_center['background'].max = 110
params_center['slope'].value = 0
lmfit.minimize(residuals, params_center, args=(time_w_pct[t_selection],
                                               center[t_selection]))

print '\nparams1:'
lmfit.report_errors(params1)
print '\nparams2:'
lmfit.report_errors(params2)
print '\nparams_center:'
lmfit.report_errors(params_center)

img1 = aolPyModules.plotting.center_histogram_2d(time_w_dither, width,
                                                 time_ax, width_ax)
img2 = aolPyModules.plotting.center_histogram_2d(time_w_pct, width,
                                                 time_ax, width_ax)
img_center = aolPyModules.plotting.center_histogram_2d(time_w_pct, center,
                                                       time_ax, center_ax)

mean1 = img1.T.dot(width_ax) / img1.sum(0)
mean2 = img2.T.dot(width_ax) / img2.sum(0)
mean_center = img_center.T.dot(center_ax) / img_center.sum(0)
Beispiel #26
0
#print omegataus
#print masterchi
params['beta'].vary=True
params['logK_dd'].value=0
params['logK_dd'].min=-2
params['logK_dd'].max=2
params['logtau'].value=0
params['logtau'].min=-3
params['logtau'].max=3
omegataus=np.array(omegataus)
out=minimize(residual,params,args=(omegataus,masterchi))
result=omegataus+out.residual
fit=residual(params,omegataus)
print 'beta '+str(params['beta'].value)

report_errors(params)
####parameter updaten und ausgabe anpassen

while True:
	tauout=open('tau.dat','w')
	for i in range(0,taus.__len__()):
		taus[i]=taus[i]+params['logtau'].value
		tauout.write(str(temps[i])+' '+str(taus[i])+'\n')
	break
omegataus=[]
masterchi=[]

ax.cla()
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('Masterkurve')
                params.add('x0_b',value=p0[4])
                params.add('y0_b',value=p0[5])
            
                galtype = galsim.Gaussian
                imsize = 49
                pixel_scale = 0.2


                print " ************** About to fit"

                result = lmfit.minimize(mssg_drawLibrary.resid_2obj,   params,   args=(blend, imsize,imsize,pixel_scale, galtype, galtype ))

                ipdb.set_trace()
            
            # Report the parameters to the interpreter screen                        
                lmfit.report_errors(result.params)
            
                sys.exit()










######################################################### Deblend
# Use deblending code to separate them
#    templates = for each img
Beispiel #28
0
define how to compare data to the function
'''
def hanle_fit(params , x, data, err):
    model = hanle_model(params, x)
    return (model - data)/err




params = lmfit.Parameters()
params.add('amplitude', value = 172764)
params.add('gamma', value = 10)
params.add('offset', value = 8264)

x_data = (np.array([12.00,10.97,10.03,9.29,8.29,7.10,6.09,5.10,4.05]))+0.825 #11.1
y_err = np.array([434,442,435,434,439,429,438,437,435])
y_data = np.array([30917,30590,31248,31314,31346,32141,31766,32516,33161]) #4120

result = lmfit.minimize(hanle_fit, params, args = (x_data, y_data, y_err))

fit_values  = y_data + result.residual
lmfit.report_errors(params)

contrast = params['amplitude'].value/params['offset'].value/params['gamma'].value/2
print 'contrast = ',contrast


#lmfit.report_errors(params)
pyplot.errorbar(x_data,y_data,y_err,ls='None',markersize = 3.0,fmt='o')
pyplot.plot(np.arange(-40,40,0.1),hanle_model(params,np.arange(-40,40,0.1)))
pyplot.show()
def fit_conic(xx, yy, Rh, thh, PAh, xxh, yyh,
              symmetrical=True, freeze_theta=False, full=False, Rmin=0.0):
    """Fit conic section to the data xx, yy

    The hyperbola is described by 5 parameters:

    Rh : radius of curvature on the axis
    thh: asymptotic angle of wings from axis
    PAh: Position Angle of axis (pegged to xhh, yhh if symmetrical=True)
    xxh: xx position of center-of-curvature
    yyh: yy position of center-of-curvature

    If symmetrical=True, then the axis must pass through the star (origin of xx, yy frame)
    => tan(PAh) = xxh/yyh

    The hyperbola is given as y(x), where y goes along the hyperbola
    axis and x is perpendicular to the hyperbola axis.

    The origin of the xy frame (x = y = 0) is the "nose" of the
    hyperbola, with coordinates in the xx-yy frame of 

    xx0 = xxh + Rh sin(PAh)
    yy0 = yyh + Rh cos(PAh)

    The y-axis is along the [xx, yy] unit vector: 

    yhat = [-sin(PAh), -cos(PAh)]

    The x-axis is along the [xx, yy] unit vector: 

    xhat = [-cos(PAh), -sin(PAh)]

    Unit distance in the xy frame corresponds to Rh in the xx-yy frame

    So that x = ((xx - xx0) (-cos(PAh)) + (yy - yy0) (-sin(PAh)))/Rh
            y = ((xx - xx0) (-sin(PAh)) + (yy - yy0) (-cos(PAh)))/Rh

    """
    
    def model_minus_data(params, xx, yy, full=False):
        """Function to minimize - gives equal weight to all points and is in units of arcsec"""
        # Unpack parameters
        PAh = params["PA"].value
        Rh = params["R"].value
        xxh = params["xx"].value
        yyh = params["yy"].value
        thh = params["th"].value
        # Transform from (xx,yy) to (x,y) frame
        sPA, cPA = np.sin(np.radians(PAh)), np.cos(np.radians(PAh))
        xx0, yy0 = xxh + Rh*sPA, yyh + Rh*cPA
        x = (-(xx0 - xx)*cPA + (yy0 - yy)*sPA)/Rh
        y = ((xx0 - xx)*sPA + (yy0 - yy)*cPA)/Rh
        residual = Rh*(yconic_th(x, thh) - y)
        if full:
            return {"residual": residual, "x": x, "y": y}
        else:
            return residual

    approx_scale = abs(xx[0]) + abs(xx[-1]) + abs(yy[0]) + abs(yy[-1])

    # Pack arguments into parameters for the fit
    params = lmfit.Parameters()
    params.add("PA", value=PAh)
    params.add("R", value=Rh, min=Rmin, max=2*approx_scale)    
    params.add("xx", value=xxh)
    params.add("yy", value=yyh)
    params.add("th", value=thh, min=-90.0, max=90.0, vary=not freeze_theta)
    if symmetrical:
        params["xx"].expr = "yy * tan(PA*pi/180.0)"
    lmfit.minimize(model_minus_data, params, args=(xx, yy))
    lmfit.report_errors(params)

    # Unpack parameters again for results to return
    results = [params[k].value for k in "R", "th", "PA", "xx", "yy"]
    if full:
        return tuple(results + [model_minus_data(params, xx, yy, True)])
    else:
        return tuple(results)
Beispiel #30
0
def grid_fit(nparray, params, barr, plotflag = True):
    """ Fit a grid to a 2D data set
    
    """
    def grid(ov, oh, s, h, w, a):
        """ Return a 2d grid
        
        h = height of image
        w = width of image
        ov = origin vertical
        oh = origin horizontal
        s = spacing
        a = amplitude
        
        """
        g = np.zeros((h,w)) #nparray full of zeros
        nv = int(w/s +2) #+2 just to make sure
        nh = int(h/s+2)
        
        #calculate grid origin offset
        _hp = ov
        while _hp >= s:
            _hp = _hp - s

        #print _hp
        for _nv in xrange(nv): # for every vertical line
            #calculate horizontal position   
            _pos = _nv * s + _hp
            #print _pos
            for _h in xrange(h): #for every height
                try:
                    g[_h, int(_pos)] = (1-(_pos - int(_pos))) *a
                    g[_h, int(_pos+1)] = (1-(int(_pos+1)-_pos)) *a
                except IndexError:
                    pass
        
        _vp = oh
        while _vp >= s:
            _vp = _vp -s
            
        for _nh in xrange(nh):
            _pos = _nh * s + _vp
            for _v in xrange(w):
                try:
                    g[int(_pos), _v] = (1-(_pos - int(_pos))) *a
                    g[int(_pos+1), _v] = (1-(int(_pos+1)-_pos)) *a
                except IndexError:
                    pass
           
        return g
    
    def res(params, nparray):
        s = params['s'].value
        ov = params['ov'].value
        oh = params['oh'].value
        a = params['a'].value
        h = nparray.shape[0]
        w = nparray.shape[1]
        #b = params['b'].value
        #barr = np.ones((h,w))*b
        
        model = grid(ov, oh, s, h, w, a)
        
        err = nparray - (barr-model)
        err = err.flatten()
        
        return err
    
    #do fit
    minimize(res, params, args=(nparray,))
    #komma ist wichtig, da tupel erwartet!
    report_errors(params)
    
    if plotflag == True:
        s = params['s'].value
        ov = params['ov'].value
        oh = params['oh'].value
        a = params['a'].value
        h = nparray.shape[0]
        w = nparray.shape[1]
        #b = params['b'].value
        #barr = np.ones((h,w))*b
        
        g = grid(ov, oh, s, h, w, a)
        #sp.misc.imsave('grid.jpg', g)
        #fit = nparray+g-barr
        plt.cla()
        plt.clf()
        plt.imshow(img)
        #plt.hold(True)
        plt.imshow(g, alpha=0.5)
        #plt.colorbar()
        plt.savefig('gridfit.jpg')
        sp.misc.imsave('grid.jpg', g)
        plt.show()
        
        
        
    return params
Beispiel #31
0
# do fit, here with leastsq model
result1 = minimize(quarterPowerFunction, params1, args=(x, data))


params2 = Parameters()
params2.add('alpha', value= 1,  min=0)
params2.add('power', value= 1)
result2 = minimize(fcn2min, params2, args=(x, data))

# calculate final result
final1 = data + result1.residual

final2 = data + result2.residual

# write error report
report_errors(params1)

pp = pprint.PrettyPrinter(indent=4)
pp.pprint(params1)
print params1.get('alpha')
p=params1.get('alpha')
print "standard error ", p.stderr

total= np.sum(result1.residual)

print "sum of residuals: ", total
#print "xtol: ", result1.xtol
print "reduced chi-square: ", result1.redchi
#print "asteval", result1.asteval
print "message:", result1.message
print "ier:", result1.ier
    mcmc_fit.flatchain, axis=0)

mcmc_model = generate_spiderman_model(times, planet_info, nrg_ratio,
                                      temp_night, delta_T, T_star,
                                      spider_params)

plot_now = True
if plot_now:
    ax = plot_model(times, data, fpfs, label='STARRY', ax=None)
    ax = plot_model(times, init_model, fpfs, label='Init Model', ax=ax)
    ax = plot_model(times, fit_model, fpfs, label='MLE Model', ax=ax)
    ax = plot_model(times, mcmc_model, fpfs, label='MCMC Model', ax=ax)

corner_plot = False
if corner_plot:
    report_errors(mcmc_fit.params)

    res = mcmc_fit
    res_var_names = np.array(res.var_names)
    res_flatchain = np.array(res.flatchain)
    res_df = DataFrame(res_flatchain.copy(), columns=res_var_names)
    res_df = DataFrame(res_flatchain.copy(), columns=res_var_names)
    res_df.sort_index('columns', inplace=True)

    def add_newline(label):
        return label + '\n'

    color = 'indigo'

    n_sigma = 3
    levels = [
Beispiel #33
0
 def report(self, params):
     lmfit.report_errors(params)
Beispiel #34
0
                            ind_kdtree=ind_kdtree,
                            pld_intensities=pld_intensities,
                            method=method.lower(),
                            gw_kdtree=gw_kdtree,
                            transit_indices=transit_indices,
                            x_bin_size=x_bin_size,
                            y_bin_size=y_bin_size)

print('Fitting the Model')
# Setup up the call to minimize the residuals (i.e. ChiSq)
mle0 = Minimizer(partial_residuals, initialParams)
start = time()
fitResult = mle0.leastsq()  # Go-Go Gadget Fitting Routine
print("LMFIT operation took {} seconds".format(time() - start))

report_errors(fitResult.params)

print('Establishing the Best Fit Solution')

bf_model_set = skywalker.generate_best_fit_solution(
    fitResult.params,
    times=times,
    xcenters=xcenters,
    ycenters=ycenters,
    fluxes=fluxes,
    knots=knots,
    keep_inds=keep_inds,
    method=method,
    nearIndices=nearIndices,
    ind_kdtree=ind_kdtree,
    gw_kdtree=gw_kdtree,
Beispiel #35
0
def test_report_errors_deprecated(fitresult):
    """Verify that a FutureWarning is shown when calling report_errors."""
    with pytest.warns(FutureWarning):
        report_errors(params=fitresult.params)
Beispiel #36
0
	amp = pars['amp'].value
	per = pars['period'].value
	shift = pars['shift'].value
	decay = pars['decay'].value
	
	if abs(shift) > np.pi/2:
		shift = shift - np.sign(shift)*np.pi
	model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
	if data is None:
		return model
	return (model - data)

n = 20
xmin = 10.
xmax = 250.0
noise = np.random.normal(scale=0.7215, size=n)
x = np.linspace(xmin, xmax, n)
data  = residual(p_true, x) + noise

fit_params = Parameters()
fit_params.add('amp', value=13.0)
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)
out = minimize(residual, fit_params, args=(x,), kws={'data':data})
fit = residual(fit_params, x)
report_errors(fit_params)
plt.plot(x,fit)
plt.plot(x,data,linestyle='None',marker='o')
plt.show()
#pyplot.plot(data_x,data_y)

params = lmfit.Parameters()
params.add('amplitude', value=33453, min=0)
params.add('gamma', value=24.0)
params.add('offset', value=0.05)
params.add('beta', value=1.60, min=0)
params.add('Omega', value=30.704)
params.add('B', value=1.20)
params.add('center', value=227)

result = lmfit.minimize(micro_fit, params, args=(data_x, data_y, y_err))

fit_values = data_y + result.residual

lmfit.report_errors(params)

normalization = params['amplitude'] / (params['gamma'] / 2.0)**2

pyplot.plot(np.arange(120, 340, 0.1) - params['center'],
            micro_model(params, np.arange(120, 340, 0.1)) / normalization,
            linewidth=1.5)
pyplot.errorbar(data_x - params['center'],
                data_y / normalization,
                data_yerr / normalization,
                linestyle='None',
                markersize=4.0,
                fmt='o',
                color='black')
pyplot.axis([-110, 110, 0.04, 0.95])
Beispiel #38
0
data = residual(p_true, x) + noise

fit_params = Parameters()
fit_params.add('amp', value=13.0)
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)

out = minimize(residual, fit_params, args=(x, ), kws={'data': data})

fit = residual(fit_params, x)

print(' N fev = ', out.nfev)
print(out.chisqr, out.redchi, out.nfree)

report_errors(fit_params)
#ci=calc_ci(out)
ci, tr = conf_interval(out, trace=True)
report_ci(ci)

if HASPYLAB:
    names = fit_params.keys()
    i = 0
    gs = pylab.GridSpec(4, 4)
    sx = {}
    sy = {}
    for fixed in names:
        j = 0
        for free in names:
            if j in sx and i in sy:
                ax = pylab.subplot(gs[i, j], sharex=sx[j], sharey=sy[i])