import astropy.io.fits as iofits from scipy.stats import norm from astropy.modeling import models, fitting #A # Params fmax, fmin, nbin = 3000, 0, 300 #A fits = iofits.open('data/fits/fpC-001729-r3-0083.fit.gz') img = fits[0].data param = norm.fit(img.flatten()) print(param[0], param[1]) print('中央値:{}, 平均値:{}'.format(np.median(img), img.mean())) x = np.linspace(fmin, fmax, nbin + 1) + (fmax - fmin) / nbin / 2 #A x = x[:-1] #A pdf_fitted = norm.pdf(x, loc=param[0], scale=param[1]) #A fit = fitting.LevMarLSQFitter() #A gauss_init = models.Gaussian1D(mean=x[np.argmax(pdf_fitted)], stddev=100, amplitude=max(pdf_fitted)) #A result = fit(gauss_init, x, pdf_fitted) #A print(result, result.mean[0]) print(gauss_init) plt.plot(x, pdf_fitted, 'r-') plt.plot(x, result(x), 'b.') plt.xlim([0, fmax]) #A plt.show()
def find_source(im, guesspos=None, searchbox=None, fitbox=None, guessmeth='max', smooth=0, searchsmooth=3, guessFWHM=None, guessamp=None, guessbg=None, method='fast', sign=1, verbose=False, fixFWHM=False, fixpos=False, minamp=0.01, maxamp=None, plot=False, maxFWHM=None, minFWHM=None, posradius=None, silent=False): """ find the point source in an image and provide its best fit parameters from a Gaussian fit Parameters: - im: image to be searched - guesspos: 2D array with the (y, x) guessed position for the point source position. If None, use the middle of the whole image - searchbox: int or 2D array with (y, x) size, size of box to be searched centered on the guesspos. If None, search the whole image - fitbox: int or 2D array with (y, x) size, size of box used for the fit. If None, use searchbox - guessmeth: method for guesstimating the point source position (currently only 'max' and other). For 'max' use the maximum brightness pixel in the searchbox. Other: use centre of the searchbox - smooth: optional smoothing with a Gaussian. The value specifies the width of the Gaussian used for the smoothing - searchsmooth: optional smoothing only for guesstimating source position - guessFWHM: guess for the FWHM of the source. If None, use middle between minFWHM and maxFWHM or 5 if the former are not provided - guessamp: guess for the amplitude of the source. If None, use the pixel brightness at the guess position - guessbg: guess for the background level in the image. If None, use the median of the image. - method: method used for the fitting: 'fast': use LevMarLSQFitter from astropy, 'mpfit': use the MPFIT package - sign: sign of the point source to be searched - fixFWHM: fix the FWHM of the source to the guess value - fixpos: fix the position of the source to the guess value - minamp, maxamp: minimum and maximum allowed values for the amplitude of the source - minFWHM, maxFWHM: minimum and maximum allowed values for the FWHM of the source - posradius: maximum allowed radius in pix around the guess position for the source location. If None, the whole searchbox is allowed """ s2f = (2.0 * np.sqrt(2.0 * np.log(2.0))) f2s = 1.0 / s2f s = np.shape(im) if sign < 0: im = -im if smooth > 0: im = gaussian_filter(im, sigma=smooth) if guesspos is None: guesspos = 0.5 * np.array(s) if verbose: print("GET_POINTSOURCE: method: ", method) print("GET_POINTSOURCE: s: ", s) print("GET_POINTSOURCE: sign: ", sign) print("GET_POINTSOURCE: searchsmooth: ", searchsmooth) print("GET_POINTSOURCE: guessmeth: ", guessmeth) print("GET_POINTSOURCE: initial guessamp: ", guessamp) print("GET_POINTSOURCE: initial guessbg: ", guessbg) print("GET_POINTSOURCE: intial guesspos: ", guesspos) print("GET_POINTSOURCE: intial searchbox: ", searchbox) # --- define the search box if searchbox is not None: # test if the box provided is an integer, in which case blow up to array if not hasattr(searchbox, "__len__"): searchbox = np.array([searchbox, searchbox]) searchbox = np.array(searchbox, dtype=int) sx0 = np.max([0, int(np.round(guesspos[1] - 0.5 * searchbox[1]))]) sx1 = np.min([s[1], int(np.round(guesspos[1] + 0.5 * searchbox[1]))]) sy0 = np.max([0, int(np.round(guesspos[0] - 0.5 * searchbox[0]))]) sy1 = np.min([s[0], int(np.round(guesspos[0] + 0.5 * searchbox[0]))]) searchim = im[sy0:sy1, sx0:sx1] if verbose: print("GET_POINTSOURCE: sy0, sy1, sx0, sx1 ", sy0, sy1, sx0, sx1) searchbox = np.array(np.shape(searchim)) # print("GET_POINTSOURCE: ss: ", np.shape(searchim)) else: searchbox = np.array(s, dtype=int) sx0 = 0 sy0 = 0 searchim = im if verbose: print("GET_POINTSOURCE: final searchbox: ", searchbox) # --- should the first guess be based on the max or on the position? if guessmeth == 'max': if searchsmooth > 0: # smoothing is quick and thus on by default ssim = gaussian_filter(searchim, sigma=searchsmooth, mode='nearest') guesspos = np.array(np.unravel_index(np.nanargmax(ssim), searchbox)) if plot is True: plt.figure(1, figsize=(3, 3)) plt.imshow(ssim, origin='bottom', interpolation='nearest') plt.title('Smoothed Search image') plt.show() # print("GET_POINTSOURCE: guesspos: ", guesspos) else: guesspos = np.array( np.unravel_index(np.nanargmax(searchim), searchbox)) else: guesspos = 0.5 * searchbox if verbose: print("GET_POINTSOURCE: guesspos in searchbox: ", guesspos) print("GET_POINTSOURCE: guesspos in total image: ", guesspos[0] + sy0, guesspos[1] + sx0) # print("GET_POINTSOURCE: guesspos: ", guesspos) guesspos = np.array([guesspos[0] + sy0, guesspos[1] + sx0]) if plot is True: plt.clf() plt.close(1) plt.figure(1, figsize=(3, 3)) plt.imshow(searchim, origin='bottom', interpolation='nearest') plt.title('Search image') plt.show() if verbose: print("GET_POINTSOURCE: intial fitbox: ", fitbox) # --- define the fit box if fitbox is not None: # test if the box provided is an integer, in which case blow up to array if not hasattr(fitbox, "__len__"): fitbox = np.array([fitbox, fitbox]) fitbox = np.array(fitbox, dtype=int) fx0 = int(np.round(guesspos[1] - 0.5 * fitbox[1])) fx1 = int(np.round(guesspos[1] + 0.5 * fitbox[1])) fy0 = int(np.round(guesspos[0] - 0.5 * fitbox[0])) fy1 = int(np.round(guesspos[0] + 0.5 * fitbox[0])) guesspos = 0.5 * fitbox # print('guesspos, fx0, fx1, fy0, fy1 ', guesspos, fx0, fx1, fy0, fy1) # for the new guess position, we have to take into account if the # fitbox is smaller than expected because being close to the edge if fx0 < 0: guesspos[1] = guesspos[1] + fx0 fx0 = 0 # if fx1 > s[1]: # guesspos[1] = guesspos[1] - (fx1 - s[1]) # fx1 = s[1] if fy0 < 0: guesspos[0] = guesspos[0] + fy0 fy0 = 0 # if fy1 > s[0]: # guesspos[0] = guesspos[0] - (fy1 - s[0]) # fy1 = s[0] fitim = im[fy0:fy1, fx0:fx1] fs = np.array(np.shape(fitim)) else: fitim = im fx0 = 0 fy0 = 0 fs = np.shape(fitim) fitbox = fs if verbose: print("GET_POINTSOURCE: final fitbox: ", fitbox) print("GET_POINTSOURCE: final guesspos in fitbox: ", guesspos) if plot is True: plt.figure(1, figsize=(3, 3)) plt.imshow(fitim, origin='bottom', interpolation='nearest') plt.title('(Sub)image to be fitted') plt.show() if guessFWHM is None: if maxFWHM is not None and minFWHM is not None: guessFWHM = 0.5 * (maxFWHM + minFWHM) elif maxFWHM is not None: guessFWHM = 0.5 * maxFWHM elif minFWHM is not None: guessFWHM = 2 * minFWHM else: guessFWHM = 5 # --- estimate the BG with ignoring central source (use either 3*FWHM or # 80% of image whatever is smaller). First generate a background image # of sufficient size bgbox = int(np.round(6 * guessFWHM)) if verbose: print('bgbox:', bgbox) print("bgcenpos: ", [fy0 + 0.5 * fitbox[0], fx0 + 0.5 * fitbox[1]]) bgim = _crop_image(im, box=bgbox, cenpos=[fy0 + 0.5 * fitbox[0], fx0 + 0.5 * fitbox[1]], exact=False) ignore_aper = np.min([3 * guessFWHM, 0.8 * np.max(s)]) bgval, bgstd = _measure_bkg(bgim, ignore_aper=ignore_aper) if guessbg is None: guessbg = bgval if guessamp is None: guessamp = fitim[int(guesspos[0]), int(guesspos[1])] - guessbg if maxFWHM is None: maxFWHM = np.max(s) if minFWHM is None: minFWHM = 1 maxsigma = maxFWHM * f2s minsigma = minFWHM * f2s if posradius is not None: minx = guesspos[1] - posradius maxx = guesspos[1] + posradius miny = guesspos[0] - posradius maxy = guesspos[0] + posradius else: minx = 0 maxx = fs[1] miny = 0 maxy = fs[0] sigma = guessFWHM * f2s guess = [guessbg, guessamp, guesspos[1], guesspos[0], sigma, sigma, 0] if verbose: print(' - GET_POINTSOURCE: Guess: ', guess) print(' - GET_POINTSOURCE: minFWHM: ', minFWHM) print(' - GET_POINTSOURCE: maxFWHM: ', maxFWHM) print(' - GET_POINTSOURCE: minsigma: ', minsigma) print(' - GET_POINTSOURCE: maxsigma: ', maxsigma) print(' - GET_POINTSOURCE: minamp: ', minamp) print(' - GET_POINTSOURCE: maxamp: ', maxamp) print(' - GET_POINTSOURCE: minx,maxx, miny,maxy: ', minx, maxx, miny, maxy) y, x = np.mgrid[:fs[0], :fs[1]] g_init = models.Gaussian2D(amplitude=guessamp, x_mean=guesspos[1], y_mean=guesspos[0], x_stddev=sigma, y_stddev=sigma) c_init = models.Const2D(amplitude=guessbg) init = g_init + c_init gim = init(x, y) if plot is True: plt.figure(1, figsize=(3, 3)) plt.imshow(gim, origin='bottom', interpolation='nearest') plt.title('Guess') plt.show() if np.isnan(fitim).any(): if not silent: print( "GET_POINTSOURCE: WARNING: image to be cropped contains NaNs!") fitim[np.isnan(fitim)] = guessbg # set any NaNs to 0 for crop to work if ('mpfit' in method): # params=[] - initial input parameters for Gaussian function. # (height, amplitude, x, y, width_x, width_y, rota) # parameter limits minpars = [0, minamp, minx, miny, minsigma, minsigma, 0] maxpars = [0, maxamp, maxx, maxy, maxsigma, maxsigma, 0] limitedmin = [False, True, True, True, True, True, False] limitedmax = [False, False, True, True, True, True, False] # ensure that the fit is positive if the sign is 1 # (or negative if the sign is -1) if minamp is None: limitedmin[1] = False if maxamp: limitedmax[1] = True if fixFWHM: limitedmin[4] = True limitedmax[4] = True minpars[4] = sigma - 0.001 maxpars[4] = sigma + 0.001 limitedmin[5] = True limitedmax[5] = True minpars[5] = sigma - 0.001 maxpars[5] = sigma + 0.001 if fixpos: limitedmin[2] = True limitedmax[2] = True minpars[2] = guesspos[1] - 0.001 maxpars[2] = guesspos[1] + 0.001 limitedmin[3] = True limitedmax[3] = True minpars[3] = guesspos[0] - 0.001 maxpars[3] = guesspos[0] + 0.001 res = _gaussfit(fitim, err=None, params=guess, returnfitimage=True, return_all=1, minpars=minpars, maxpars=maxpars, limitedmin=limitedmin, limitedmax=limitedmax) params = res[0][0] perrs = res[0][1] if perrs is None: perrs = np.full(6, -1, dtype=float) fit = res[1] elif 'fast' in method: # ensure that the fit is positive init.amplitude_0.bounds = (minamp, maxamp) init.x_mean_0.bounds = (minx, maxx) init.y_mean_0.bounds = (miny, maxy) init.x_stddev_0.bounds = (minsigma, maxsigma) # --- ensure that angle stays in useful pounds #init.theta_0.bounds = (-2*np.pi, 2*np.pi) # somehow fixing the angle does not work if fixFWHM: init.x_stddev_0.fixed = True init.y_stddev_0.fixed = True if fixpos: init.x_mean_0.fixed = True init.y_mean_0.fixed = True fit_meth = fitting.LevMarLSQFitter() # fit_meth = fitting.SimplexLSQFitter() # very slow # fit_meth = fitting.SLSQPLSQFitter() # not faster than LevMar g_fit = fit_meth(init, x, y, fitim, acc=1e-8) fit = g_fit(x, y) params = np.array([ g_fit.amplitude_1.value, g_fit.amplitude_0.value, g_fit.x_mean_0.value, g_fit.y_mean_0.value, g_fit.x_stddev_0.value, g_fit.y_stddev_0.value, g_fit.theta_0.value ]) perrs = params * 0 # this method does not provide uncertainty estimates # --- convert theta to deg: params[6] = params[6] / np.pi * 180.0 # --- theta measures the angle from the x-axis, so we need to add 90 params[6] = params[6] + 90.0 # print(init.x_stddev_0.fixed, init.x_stddev_0.bounds) # print(g_fit.x_stddev_0.fixed, g_fit.x_stddev_0.bounds) else: print("GET_POINTSOURCE: ERROR: non-valid method requested: " + method + "\n returning None") return (None, None, None) # --- use the STD of the BG for the BG level uncertainty if larger than # error estimate if bgstd > perrs[0]: perrs[0] = bgstd if verbose: print("GET_POINTSOURCE: uncorrected fit params: ", params) print("GET_POINTSOURCE: uncorrected fit errs: ", perrs) # --- compute the position in the total image and switch x and y to agree # with the numpy convention temp = np.copy(params) params[2] = temp[3] + fy0 params[3] = temp[2] + fx0 temp = np.copy(perrs) perrs[2] = temp[3] perrs[3] = temp[2] # --- if the y FWHM is larger than the one in x direction, switch them so # that the first FWHM is the major axis one. if params[5] > params[4]: temp = params[4] params[4] = params[5] params[5] = temp temp = perrs[4] perrs[4] = perrs[5] perrs[5] = temp params[6] = params[6] + 90.0 # --- normalise the angle params[6] = params[6] % 180 if params[6] < 0: params[6] = params[6] + 180 # if sign < 0: params[0] = -params[0] params[1] = -params[1] fit = -fit fitim = -fitim if verbose: print(" - GET_POINTSOURCE: fitted params: ", params) # convert sigma to FWHM for the output: params[4:6] = params[4:6] * s2f if plot is True: plt.figure(1, figsize=(3, 3)) plt.imshow(fit, origin='bottom', interpolation='nearest') plt.title('Fit with sign') plt.show() plt.close(1) plt.figure(1, figsize=(3, 3)) plt.imshow(fitim - fit, origin='bottom', interpolation='nearest') plt.title('Residual') plt.show() plt.close(1) ims = [fitim, fit, fitim - fit] return (params, perrs, ims)
def _fit_lines(spectrum, model, fitter=fitting.LevMarLSQFitter(), exclude_regions=None, weights=None, window=None, ignore_units=False, **kwargs): """ Fit the input model (initial conditions) to the spectrum. Output will be the same model with the parameters set based on the fitting. spectrum, model -> model """ # # If we are to exclude certain regions, then remove them. # if exclude_regions is not None: spectrum = excise_regions(spectrum, exclude_regions) if isinstance(weights, str): if weights == 'unc': uncerts = spectrum.uncertainty if uncerts is not None: weights = uncerts.array ** -2 else: logging.warning("Uncertainty values are not defined, but are " "trying to be used in model fitting.") else: raise ValueError("Unrecognized value `%s` in keyword argument.", weights) elif weights is not None: # Assume that the weights argument is list-like weights = np.array(weights) dispersion = spectrum.spectral_axis dispersion_unit = spectrum.spectral_axis.unit flux = spectrum.flux flux_unit = spectrum.flux.unit # # Determine the window if it is not None. There # are several options here: # window = 4 * u.Angstrom -> Quantity # window = (4*u.Angstrom, 6*u.Angstrom) -> tuple # window = (4, 6)*u.Angstrom -> Quantity # # # Determine the window if there is one # # In this case the window defines the area around the center of each model if window is not None and isinstance(window, (float, int)): center = model.mean indices = np.nonzero((spectrum.spectral_axis >= center-window) & (spectrum.spectral_axis < center+window)) dispersion = dispersion[indices] flux = flux[indices] if weights is not None: weights = weights[indices] # In this case the window is the start and end points of where we # should fit elif window is not None and isinstance(window, tuple): indices = np.nonzero((dispersion >= window[0]) & (dispersion < window[1])) dispersion = dispersion[indices] flux = flux[indices] if weights is not None: weights = weights[indices] elif window is not None and isinstance(window, SpectralRegion): try: idx1, idx2 = window.bounds if idx1 == idx2: raise Exception("Bad selected region.") extracted_regions = extract_region(spectrum, window) dispersion, flux = _combined_region_data(extracted_regions) dispersion = dispersion * dispersion_unit flux = flux * flux_unit except ValueError as e: return if flux is None or len(flux) == 0: raise Exception("Spectrum flux is empty or None.") input_spectrum = spectrum spectrum = Spectrum1D( flux=flux.value * flux_unit, spectral_axis=dispersion.value * dispersion_unit, wcs=input_spectrum.wcs, velocity_convention=input_spectrum.velocity_convention, rest_value=input_spectrum.rest_value) # # Compound models with units can not be fit. # # Convert the model initial guess to the spectral # units and then remove the units # model_unitless, dispersion_unitless, flux_unitless = \ _strip_units_from_model(model, spectrum, convert=not ignore_units) # # Do the fitting of spectrum to the model. # fit_model_unitless = fitter(model_unitless, dispersion_unitless, flux_unitless, weights=weights, **kwargs) # # Now add the units back onto the model.... # if not ignore_units: fit_model = _add_units_to_model(fit_model_unitless, model, spectrum) else: fit_model = QuantityModel(fit_model_unitless, spectrum.spectral_axis.unit, spectrum.flux.unit) return fit_model
def gen_center_g2d(image, center_x, center_y, box_width, amp, x_std, y_std, Theta, model_plotting=False): """ PARAMETERS: center_x = x coordinate of the circular aperture; Type = float center_y = y coordinate of the circular aperture; Type = float amp = amplitude of the gaussian. Find from the projection curve along the center; Type = float x_std = Standard deviation of the Gaussian in x before rotating by theta; Type = float y_std = Standard deviation of the Gaussian in y before rotating by theta; Type = float Theta = Rotation angle in radians. The rotation angle increases counterclockwise; Type = float RETURNS: seperate_centers = Center of each image; Type = Array [of tuples] x_values = x_value of center of each image; Type = Array y_values = y_value of center of each image; Type = Array """ #Creating a mesh grid with the shape of image to create model y_pos, x_pos = np.mgrid[:image.shape[0], :image.shape[1]] #defining starting and stopping points for drawing a box to fit the gaussian to xA, yA = int(center_x - box_width), int(center_y - box_width) xB, yB = int(center_x + box_width), int(center_y + box_width) # fitting the gaussian model fit_g = fitting.LevMarLSQFitter() gauss2D = models.Gaussian2D(amplitude=amp, x_mean=center_x, y_mean=center_y, x_stddev=x_std, y_stddev=y_std, theta=Theta) g = fit_g(gauss2D, x_pos[yA:yB, xA:xB], y_pos[yA:yB, xA:xB], image[yA:yB, xA:xB]) g1 = fit_g(g, x_pos[yA:yB, xA:xB], y_pos[yA:yB, xA:xB], image[yA:yB, xA:xB]) #pdb.set_trace() new_xCen = g1.x_mean[0] new_yCen = g1.y_mean[0] fwhm_x = g1.x_fwhm fwhm_y = g1.y_fwhm if model_plotting == True: plt.subplot(131) plt.imshow(image[yA:yB, xA:xB]) plt.title('Data') plt.subplot(132) plt.imshow(g1(x_pos[yA:yB, xA:xB], y_pos[yA:yB, xA:xB])) plt.title('Model') plt.subplot(133) plt.imshow(image[yA:yB, xA:xB] - g1(x_pos[yA:yB, xA:xB], y_pos[yA:yB, xA:xB])) plt.title('Residual') #Results return new_xCen, new_yCen, fwhm_x, fwhm_y
def test_deriv_2D(self, model_class, test_parameters): """ Test the derivative of a model by fitting with an estimated and analytical derivative. """ x_lim = test_parameters['x_lim'] y_lim = test_parameters['y_lim'] if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase): return if "log_fit" in test_parameters: if test_parameters['log_fit']: x = np.logspace(x_lim[0], x_lim[1], self.N) y = np.logspace(y_lim[0], y_lim[1], self.M) x_test = np.logspace(x_lim[0], x_lim[1], self.N * 10) y_test = np.logspace(y_lim[0], y_lim[1], self.M * 10) else: x = np.linspace(x_lim[0], x_lim[1], self.N) y = np.linspace(y_lim[0], y_lim[1], self.M) x_test = np.linspace(x_lim[0], x_lim[1], self.N * 10) y_test = np.linspace(y_lim[0], y_lim[1], self.M * 10) xv, yv = np.meshgrid(x, y) xv_test, yv_test = np.meshgrid(x_test, y_test) try: model_with_deriv = create_model(model_class, test_parameters, use_constraints=False, parameter_key='deriv_initial') model_no_deriv = create_model(model_class, test_parameters, use_constraints=False, parameter_key='deriv_initial') model = create_model(model_class, test_parameters, use_constraints=False, parameter_key='deriv_initial') except KeyError: model_with_deriv = create_model(model_class, test_parameters, use_constraints=False) model_no_deriv = create_model(model_class, test_parameters, use_constraints=False) model = create_model(model_class, test_parameters, use_constraints=False) # add 10% noise to the amplitude rsn = np.random.default_rng(0) amplitude = test_parameters['parameters'][0] n = 0.1 * amplitude * (rsn.random((self.M, self.N)) - 0.5) data = model(xv, yv) + n fitter_with_deriv = fitting.LevMarLSQFitter() new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv, data) fitter_no_deriv = fitting.LevMarLSQFitter() new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data, estimate_jacobian=True) assert_allclose(new_model_with_deriv(xv_test, yv_test), new_model_no_deriv(xv_test, yv_test), rtol=1e-2) if model_class != Gaussian2D: assert_allclose(new_model_with_deriv.parameters, new_model_no_deriv.parameters, rtol=0.1)
linepad_left = 25 if i == 47 and j == 26: linepad_left = 75 if i == 48 and j == 26: linepad_left = 75 line_y_arr_comp1 = line_comp1[line_idx-linepad_left:line_idx+linepad_right, i, j] line_x_arr_comp1 = np.linspace(line_idx-linepad_left, line_idx+linepad_right, len(line_y_arr_comp1)) line_y_arr_comp2 = line_comp2[line_idx-linepad_left:line_idx+linepad_right, i, j] line_x_arr_comp2 = np.linspace(line_idx-linepad_left, line_idx+linepad_right, len(line_y_arr_comp2)) # fitting gauss_init_lowcomp = models.Gaussian1D(amplitude=5.0, mean=line_idx-10, stddev=5.0) gauss_init_highcomp = models.Gaussian1D(amplitude=5.0, mean=line_idx+10, stddev=5.0) fit_gauss = fitting.LevMarLSQFitter() g1 = fit_gauss(gauss_init_lowcomp, line_x_arr_comp1, line_y_arr_comp1) g2 = fit_gauss(gauss_init_highcomp, line_x_arr_comp2, line_y_arr_comp2) # save lzifu total fit to array for plotting line_y_arr_total = line_total[line_idx-linepad_left:line_idx+linepad_right, i, j] # also fit raw data by a single gaussian if (i,j) in force_onecomp_arr: linepad_left = 25 linepad_right = 25 line_y_arr_data = obs_data[line_idx-linepad_left:line_idx+linepad_right, i, j] line_x_arr_data = np.linspace(line_idx-linepad_left, line_idx+linepad_right, len(line_y_arr_data))
def calc_rv_todcor(spect,wave,sig, template_fns,bad_intervals=[],fig_fn='',\ smooth_distance=201,convolve_template=True, alpha=0.3,\ nwave_log=int(1e4),ncor=1000, return_fitted=False,jd=0.0,out_fn='',\ heliocentric_correction=0, plotit=False): """Compute a radial velocity based on an best fitting template spectrum. Teff is estimated at the same time. Parameters ---------- spect: array-like The reduced WiFeS spectrum wave: array-like The wavelengths corresponding to the reduced WiFeS spectrum template_fns: string Spectral template for star 1 and star 2 that can be read in by np.loadtxt bad_intervals: List of wavelength intervals where e.g. telluric absorption is bad. For todcor, These can only be smoothed over. smooth_distance: float Distance to smooth for "continuum" correction Returns ------- rv1: float Radial velocity of star 1 in km/s rv_sig1: float Uncertainty in radial velocity (NB assumes good model fit) rv2: float Radial velocity of star 2 in km/s rv_sig2: float Uncertainty in radial velocity (NB assumes good model fit) corpeak: float Correlation peak """ (wave_log, spect_int, sig_int, template_ints) = \ interpolate_spectra_onto_log_grid(spect,wave,sig, template_fns,\ bad_intervals=bad_intervals, smooth_distance=smooth_distance, \ convolve_template=convolve_template, nwave_log=nwave_log) rvs = np.zeros(len(template_fns)) peaks = np.zeros(len(template_fns)) drv = np.log(wave_log[1] / wave_log[0]) * 2.998e5 #*** Next (hopefully with two templates only!) we continue and apply the TODCOR algorithm. window_width = nwave_log // 20 ramp = np.arange(1, window_width + 1, dtype=float) / window_width window = np.ones(nwave_log) window[:window_width] *= ramp window[-window_width:] *= ramp[::-1] template_ints[0] *= window template_ints[1] *= window spect_int *= window norm1 = np.sqrt(np.sum(template_ints[0]**2)) norm2 = np.sqrt(np.sum(template_ints[1]**2)) norm_tgt = np.sqrt(np.sum(spect_int**2)) #pdb.set_trace() c1 = np.fft.irfft( np.conj(np.fft.rfft(template_ints[0] / norm1)) * np.fft.rfft(spect_int / norm_tgt)) c1 = np.roll(c1, ncor // 2)[:ncor] c2 = np.fft.irfft( np.conj(np.fft.rfft(template_ints[1] / norm2)) * np.fft.rfft(spect_int / norm_tgt)) c2 = np.roll(c2, ncor // 2)[:ncor] #Unclear which way around this line should be. ix_c12 sign was corrected in order to #give the right result with simulated data. c12 = np.fft.irfft( np.fft.rfft(template_ints[1] / norm2) * np.conj(np.fft.rfft(template_ints[0] / norm1))) c12 = np.roll(c12, ncor // 2)[:ncor] ix = np.arange(ncor).astype(int) xy = np.meshgrid(ix, ix) #Correct the flux ratio for the RMS spectral variation. Is this needed??? alpha_norm = alpha * norm2 / norm1 ix_c12 = np.minimum(np.maximum(xy[0] - xy[1] + ncor // 2, 0), ncor - 1) #!!!This was the old line !!! #ix_c12 = np.minimum(np.maximum(xy[1]-xy[0]+ncor//2,0),ncor-1) #XXX New (temporary?) line XXX todcor = (c1[xy[0]] + alpha_norm * c2[xy[1]] ) / np.sqrt(1 + 2 * alpha_norm * c12[ix_c12] + alpha_norm**2) #print("Max correlation: {0:5.2f}".format(np.max(todcor))) #print(alpha_norm) #plt.plot(drv*(np.arange(nwave_log)-nwave_log//2),np.roll(c1,nwave_log//2)) #Figure like TODCOR paper: #fig = plt.figure() #ax = fig.gca(projection='3d') #ax.plot_surface(xy[0],xy[1],todcor) plt.clf() plt.imshow(todcor, cmap=cm.gray, interpolation='nearest', extent=[ -drv * ncor / 2, drv * ncor / 2, -drv * ncor / 2, drv * ncor / 2 ]) xym = np.unravel_index(np.argmax(todcor), todcor.shape) hw_fit = 2 if (xym[0] < hw_fit) | (xym[1] < hw_fit) | (xym[0] >= ncor - hw_fit) | ( xym[1] >= ncor - hw_fit): print("Error: TODCOR peak to close to edge!") raise UserWarning ix_fit = np.arange(-hw_fit, hw_fit + 1).astype(int) xy_fit = np.meshgrid(ix_fit, ix_fit) p_init = models.Gaussian2D(amplitude=np.max(todcor), x_mean=0, y_mean=0, x_stddev=50.0 / drv, y_stddev=50.0 / drv) fit_p = fitting.LevMarLSQFitter() p = fit_p( p_init, xy_fit[0], xy_fit[1], todcor[xym[0] - hw_fit:xym[0] + hw_fit + 1, xym[1] - hw_fit:xym[1] + hw_fit + 1]) #import pdb; pdb.set_trace() rv_x = drv * ((p.parameters[1] + xym[1]) - ncor // 2) rv_y = drv * ((p.parameters[2] + xym[0]) - ncor // 2) model_spect = rv_shift_binary(rv_x / drv, rv_y / drv, alpha, np.fft.rfft(template_ints[0]), np.fft.rfft(template_ints[1])) if plotit: (wave_log, spect_int_norm, sig_int, template_int_norm) = \ interpolate_spectra_onto_log_grid(spect,wave,sig, template_fns,\ bad_intervals=bad_intervals, smooth_distance=smooth_distance, \ convolve_template=convolve_template, nwave_log=nwave_log, \ subtract_smoothed=False) model_spect_norm = rv_shift_binary(rv_x/drv, rv_y/drv, alpha, \ np.fft.rfft(template_int_norm[0]), np.fft.rfft(template_int_norm[1])) model_spect_prim = rv_shift_binary(rv_x/drv, rv_y/drv, 0, \ np.fft.rfft(template_int_norm[0]), np.fft.rfft(template_int_norm[1])) model_spect_sec = rv_shift_binary(rv_x/drv, rv_y/drv, 1e6, \ np.fft.rfft(template_int_norm[0]), np.fft.rfft(template_int_norm[1])) ss = np.ones(5e2) / 5e2 model_ss = np.convolve(model_spect_norm, ss, mode='same') spect_ss = np.convolve(spect_int_norm, ss, mode='same') plt.clf() plt.plot(wave_log, model_spect_norm / model_ss, label='Joint Model') plt.plot(wave_log, model_spect_prim / model_ss / (1 + alpha), label='Primary') plt.plot(wave_log, model_spect_sec / model_ss * alpha / (1 + alpha), label='Secondary') plt.plot(wave_log, spect_int_norm / spect_ss, label='Data') plt.legend() plt.axis([3810, 5610, 0, 1.45]) plt.xlabel(r'Wavelength ($\AA$)') plt.ylabel('Flux (normalised)') plt.draw() #pdb.set_trace() #XXX #Compute theoretical RV uncertainties from the "Q" factors... errors = [] for i, template_int in enumerate(template_ints): if (i == 0): ti = template_int / (1 + alpha) else: ti = template_int * alpha / (1 + alpha) model_spect_deriv = (ti[1:] - ti[:-1]) / (wave_log[1:] - wave_log[:-1]) wave2_on_s = (0.5 * (wave_log[1:] + wave_log[:-1]))**2 / ( 0.5 * (ti[1:] + ti[:-1] + 2)) q_factor = np.sqrt(np.mean(wave2_on_s * model_spect_deriv**2)) photon_rv_error = 3e5 / q_factor * np.median(sig_int) / np.sqrt( len(spect)) errors.append(photon_rv_error) #ISSUES: #1) Error (below) not computed. #errors = np.sqrt(np.diag(fit_p.fit_info['cov_x'])) if len(out_fn) > 0: outfile = open(out_fn, 'a') outfile.write( '{0:12.4f}, {1:8.2f}, {2:8.2f}, {3:8.2f}, {4:8.2f}, {5:8.3f}\n'. format(jd, rv_x + heliocentric_correction, errors[0], rv_y + heliocentric_correction, errors[1], np.max(todcor))) outfile.close() if return_fitted: return wave_log, spect_int, model_spect else: return rv_x, errors[0], rv_y, errors[1], np.max(todcor)
def findHalfLightSersicdf(pgcs,df1,df2): #df1 is galbasedf with including r25- get pgc names from this file #df2 is pickle file halflights = [] amps = [] ns = [] mses = [] #print('0') for i in np.arange(len(pgcs)): print(len(pgcs)-i) galmask = df2.PGC.isin([pgcs[i]]) rp = df2.loc[galmask] #print(rp) pgc=pgcs[i] if np.isnan(rp.r_arcsec).all()==True: halflights.append(np.nan) amps.append(np.nan) ns.append(np.nan) mses.append(np.nan) #print('1') elif rp.r_arcsec.min()>200: halflights.append(np.nan) amps.append(np.nan) ns.append(np.nan) mses.append(np.nan) #print('2') else: try: #print('3') rp.r_arcsec/=3600. r25 = df1.loc[i].R25_DEG #print(r25)`=-0 #r25= r25.tolist()[0] #print(r25) mask = rp.r_arcsec<2*r25 #print(mask) rp = rp[mask] #mask = rp.I>0 #rp = rp[mask] ind = np.where(rp.r_arcsec<.5*r25)[0][-1] sersic = models.Sersic1D(bounds = {'n':(0,14)}) outlier_fit = fitting.FittingWithOutlierRemoval(fitting.LevMarLSQFitter(),sigma_clip, niter=3, sigma=2.5) fitted_model,filtered_data = outlier_fit(sersic,rp.r_arcsec,rp.I)#,weights=0.1*rp.I) filtered_data[:ind]=False fit = fitting.LevMarLSQFitter() fitted_model = fit(sersic,rp.r_arcsec[~filtered_data],rp.I[~filtered_data])#,weights=(0.1*rp.I[~filtered_data])) mse = np.nanmean((rp.I[~filtered_data] - fitted_model(rp.I[~filtered_data]))**2) print('') print(pgc) #print(np.round(mse,decimals=2)) """ if mse>3.: ns_mse = [] for n in nrange: sersic = models.Sersic1D(bounds = {'n':(n,n+1)}) outlier_fit = fitting.FittingWithOutlierRemoval(fitting.LevMarLSQFitter(),sigma_clip, niter=3, sigma=3) fitted_model,filtered_data_chisq = outlier_fit(sersic,rp.r_arcsec[~filtered_data],rp.I[~filtered_data])#,weights=(0.1*rp.I[~filtered_data])) m = np.nanmean((rp.I[~filtered_data] - fitted_model(rp.I[~filtered_data]))**2) if m<mse: mse=m ns_mse.append(n) print(mse,n) else: pass if len(ns_mse)==0: print(ns_mse) print('none better') pass else: sersic = models.Sersic1D(bounds = {'n':(ns_mse[-1]-1,ns_mse[-1]+1)}) fitted_model = fit(sersic,rp.r_arcsec[~filtered_data],rp.I[~filtered_data]) """ re = np.round(fitted_model.r_eff.value*3600*0.9,decimals=3) #re*=0.9 #re = np.round(re,decimals=3) n = np.round(fitted_model.n.value,decimals=3) #print(re,saloratio,mm15ratio) #print(n,mmres.n[i],np.round(mmres['T'][i],decimals=2)) print(re) if re>250.: halflights.append(np.nan) amps.append('fit fail') ns.append('fit fail') mses.append('fit fail') elif re<5.: halflights.append(np.nan) amps.append('fit fail') ns.append('fit fail') mses.append('fit fail') else: halflights.append(re) amps.append(fitted_model.amplitude.value) ns.append(n) mses.append(mse) except: halflights.append(np.nan) amps.append('fit fail') ns.append('fit fail') mses.append('fit fail') redf = pd.DataFrame({'PGC':pgcs,'re':halflights,'amp': amps,'n':ns}) #pgcs = np.asarray(pgcs) #halflights = np.asarray(halflights,dtype='float') #amps = np.asarray(amps) #ns = np.asarray(ns) #return(pgcs,halflights,amps,ns,datamasks,nummasked) return(redf)
def photometry(image_paths, master_dark_path, master_flat_path, target_centroid, comparison_flux_threshold, aperture_radii, centroid_stamp_half_width, psf_stddev_init, aperture_annulus_radius, output_path): """ Parameters ---------- master_dark_path : str Path to master dark frame master_flat_path :str Path to master flat field target_centroid : `~numpy.ndarray` position of centroid, with shape (2, 1) comparison_flux_threshold : float Minimum fraction of the target star flux required to accept for a comparison star to be included aperture_radii : `~numpy.ndarray` Range of aperture radii to use centroid_stamp_half_width : int Centroiding is done within image stamps centered on the stars. This parameter sets the half-width of the image stamps. psf_stddev_init : float Initial guess for the width of the PSF stddev parameter, used for fitting 2D Gaussian kernels to the target star's PSF. aperture_annulus_radius : int For each aperture in ``aperture_radii``, measure the background in an annulus ``aperture_annulus_radius`` pixels bigger than the aperture radius output_path : str Path to where outputs will be saved. """ master_dark = fits.getdata(master_dark_path) master_flat = fits.getdata(master_flat_path) star_positions = init_centroids(image_paths[0], master_flat, master_dark, target_centroid, plots=True, min_flux=comparison_flux_threshold).T # Initialize some empty arrays to fill with data: times = np.zeros(len(image_paths)) fluxes = np.zeros( (len(image_paths), len(star_positions), len(aperture_radii))) errors = np.zeros( (len(image_paths), len(star_positions), len(aperture_radii))) xcentroids = np.zeros((len(image_paths), len(star_positions))) ycentroids = np.zeros((len(image_paths), len(star_positions))) airmass = np.zeros(len(image_paths)) airpress = np.zeros(len(image_paths)) humidity = np.zeros(len(image_paths)) telfocus = np.zeros(len(image_paths)) psf_stddev = np.zeros(len(image_paths)) medians = np.zeros(len(image_paths)) with ProgressBar(len(image_paths)) as bar: for i in range(len(image_paths)): bar.update() # Subtract image by the dark frame, normalize by flat field #imagedata = (rebin_image(fits.getdata(image_paths[i]), 2) - master_dark[:-1, :-1]) / master_flat[:-1, :-1] imagedata = (fits.getdata(image_paths[i]) - master_dark) / master_flat # Collect information from the header imageheader = fits.getheader(image_paths[i]) exposure_duration = imageheader['EXPTIME'] times[i] = Time(imageheader['DATE-OBS'], format='isot', scale='utc').jd medians[i] = np.median(imagedata) airmass[i] = imageheader['AIRMASS'] airpress[i] = imageheader['AIRPRESS'] humidity[i] = imageheader['HUMIDITY'] telfocus[i] = imageheader['TELFOCUS'] # Initial guess for each stellar centroid informed by previous centroid for j in range(len(star_positions)): if i == 0: init_x = star_positions[j][0] init_y = star_positions[j][1] else: init_x = ycentroids[i - 1][j] init_y = xcentroids[i - 1][j] # Cut out a stamp of the full image centered on the star image_stamp = imagedata[init_y - centroid_stamp_half_width:init_y + centroid_stamp_half_width, init_x - centroid_stamp_half_width:init_x + centroid_stamp_half_width] # Measure stellar centroid with 2D gaussian fit x_stamp_centroid, y_stamp_centroid = centroid_com(image_stamp) y_centroid = x_stamp_centroid + init_x - centroid_stamp_half_width x_centroid = y_stamp_centroid + init_y - centroid_stamp_half_width xcentroids[i, j] = x_centroid ycentroids[i, j] = y_centroid # import matplotlib.pyplot as plt # plt.figure() # plt.imshow(np.log(image_stamp), origin='lower', cmap=plt.cm.viridis) # plt.scatter(x_stamp_centroid, y_stamp_centroid, s=30) # plt.show() # # plt.figure() # s = np.std(imagedata) # m = np.median(imagedata) # plt.imshow(imagedata, origin='lower', cmap=plt.cm.viridis, # vmin=m-2*s, vmax=m+2*s) # plt.show() # For the target star, measure PSF: if j == 0: psf_model_init = models.Gaussian2D( amplitude=np.max(image_stamp), x_mean=centroid_stamp_half_width, y_mean=centroid_stamp_half_width, x_stddev=psf_stddev_init, y_stddev=psf_stddev_init) fit_p = fitting.LevMarLSQFitter() y, x = np.mgrid[:image_stamp.shape[0], :image_stamp. shape[1]] best_psf_model = fit_p( psf_model_init, x, y, image_stamp - np.median(image_stamp)) psf_stddev[i] = 0.5 * (best_psf_model.x_stddev.value + best_psf_model.y_stddev.value) positions = np.vstack([ycentroids[i, :], xcentroids[i, :]]) for k, aperture_radius in enumerate(aperture_radii): target_apertures = CircularAperture(positions, aperture_radius) background_annuli = CircularAnnulus( positions, r_in=aperture_radius + aperture_annulus_radius, r_out=aperture_radius + 2 * aperture_annulus_radius) flux_in_annuli = aperture_photometry( imagedata, background_annuli)['aperture_sum'].data background = flux_in_annuli / background_annuli.area() flux = aperture_photometry( imagedata, target_apertures)['aperture_sum'].data background_subtracted_flux = ( flux - background * target_apertures.area()) fluxes[i, :, k] = background_subtracted_flux / exposure_duration errors[i, :, k] = np.sqrt(flux) ## Save some values results = PhotometryResults(times, fluxes, errors, xcentroids, ycentroids, airmass, airpress, humidity, medians, psf_stddev, aperture_radii) results.save(output_path) return results
# Build psf basis N_psf_basis = abs(cut) lambdas = valh[cut:] xs = vech[:, cut:] psf_basis = [] for i in range(N_psf_basis): psf_basis.append(np.tensordot(xs[:, i], renders, axes=[0, 0])) # ============================================================================= # Manual test # ============================================================================= runtest = False #input('Run Manual test?') if runtest: prf_model = models.Gaussian2D(x_stddev=1, y_stddev=1) fitter = fitting.LevMarLSQFitter() indices = np.indices(sim.bkg_sub_img.shape) model_fits = [] best_big = srcs['tnpix'] >= p_sizes[0]**2. best_small = srcs['tnpix'] <= p_sizes[2]**2. best_flag = srcs['flag'] < 31 best_srcs = srcs[best_big & best_flag & best_small] fitshape = (4 * FWHM, 4 * FWHM) prf_model.x_mean = fitshape[0] / 2. prf_model.y_mean = fitshape[1] / 2. for row in best_srcs: position = (row['y'], row['x']) y = extract_array(indices[0], fitshape, position) x = extract_array(indices[1], fitshape, position) sub_array_data = extract_array(sim.bkg_sub_img,
def ajusta(x_cube, y_cube, imagen_in, l_min_izq, l_max_izq, l_min_der, l_max_der, guess_line, guess_FWHM, orden_pol): #def ajusta(x_cube,y_cube,orden_pol): #scientific packages import pyfits import scipy from scipy.optimize import curve_fit, leastsq import numpy as np from numpy import random, exp, sqrt import matplotlib.pyplot as plt from astropy.modeling import models, fitting, polynomial # global imagen_in # imagen_in=sys.argv[1] # l_min_izq=float(sys.argv[2]) # l_max_izq=float(sys.argv[3]) # l_min_der=float(sys.argv[4]) # l_max_der=float(sys.argv[5]) # guess_line=float(sys.argv[6]) # guess_FWHM=float(sys.argv[7]) # orden_pol=float(sys.argv[8]) def Lee_cubo(spectra, XX, YY): global imagen imagen = pyfits.getdata(spectra, header=False) header = pyfits.getheader(spectra) #print len(imagen) #empty array Lambda_t = [] Flux_t = [] for i in range(len(imagen)): y = imagen[i][XX][YY] # x=i*header['CDELT1']+header['CRVAL1'] x = i * header['CD3_3'] + header['CRVAL3'] Lambda_t.append(float(imagen[i][XX][YY])) #Flux_t.append(float(i*header['CDELT1']+header['CRVAL1'])) Flux_t.append(float(i * header['CD3_3'] + header['CRVAL3'])) #print x,y Flux = np.array(Lambda_t) Lambda = np.array(Flux_t) x = Lambda y = Flux return x, y ########################## ## ## Funcion Region ## Toma una region de un espectro entre ## un minimo lambda y un maximo lambda ## x e y corresponden a lamba y cuentas o flujo ## ########################### # def region(minimo, maximo, x, y): xar = [] yar = [] for i in range(len(x)): if (x[i] > minimo) and (x[i] < maximo): xar.append(float(x[i])) yar.append(float(y[i])) xar = np.array(xar) yar = np.array(yar) return xar, yar ######################### # # Funcion Region_discontinuo # Toma dos regiones de un espectro separadoas entre # un minimo lambda y un maximo lambda a la izquierda y un # un minimo lambda y un maximo lambda a la deracha de la emission o obsorption # x e y corresponden a lamba y cuentas (o flujo) # ########################## def region_discontinua(minimo1, maximo1, minimo2, maximo2, x, y): xar = [] yar = [] for i in range(len(x)): if ((x[i] > minimo1) and (x[i] < maximo1)) or ((x[i] > minimo2) and (x[i] < maximo2)): xar.append(float(x[i])) yar.append(float(y[i])) xar = np.array(xar) yar = np.array(yar) return xar, yar ####### # poly_fit, fitea un polinomio a datos # xp e yp correspondend a x e y a ser fiteado # # en este caso corresponden al x e y del output de la # region discontinua # # ####### def poly_fit(xp, yp, grado_pol): t_init = polynomial.Polynomial1D(degree=int(grado_pol)) fit_t = fitting.LevMarLSQFitter() t = fit_t(t_init, xp, yp) return t #calclulo original x_sci, y_sci = Lee_cubo(imagen_in, x_cube, y_cube) x = x_sci y = y_sci xspec_o, yspec_o = region(l_min_izq, l_max_der, x, y) ################# ###Fitting regions with a polynomio x_cont_o, y_cont_o = region_discontinua(l_min_izq, l_max_izq, l_min_der, l_max_der, x, y) #cont1=poly_fit(xa1,ya1,12) #cont2=poly_fit(xa2,ya2,12) cont3_o = poly_fit(x_cont_o, y_cont_o, orden_pol) #print cont1 #print cont2 #res1= -cont1(xa1)+ ya1 #res2= -cont2(xa2)+ ya2 res3_o = -cont3_o(x_cont_o) + y_cont_o #se aplica el polinomio al espectro en la zona de interes res4_o = -cont3_o(xspec_o) + yspec_o ##################### # # Normalization!!! # ###################### res4_oN = yspec_o / cont3_o(xspec_o) ###################### ################ #iteracion 1 t_init4_o = models.Gaussian1D(amplitude=1, mean=guess_line, stddev=guess_FWHM) fit_t4_o = fitting.LevMarLSQFitter() t4_o = fit_t4_o(t_init4_o, xspec_o, res4_o) a_science = t4_o.mean.value b_science = t4_o.stddev.value Amplitud = t4_o.amplitude.value # Redefiniendo: de acuerdo al FWHM #xspec_o,yspec_o=region(l_min_izq,l_max_der,x,y) #x_cont_o,y_cont_o=region_discontinua(l_min_izq,l_max_izq,l_min_der,l_max_der,x,y) #cont3_o=poly_fit(x_cont_o,y_cont_o,orden_pol) #res4_o= -cont3_o(xspec_o)+yspec_o import numpy from scipy.optimize import curve_fit import matplotlib.pyplot as plt import math CWt = t4_o.mean.value FWHMt = 2 * sqrt(2 * math.log(2)) * t4_o.stddev.value At = t4_o.amplitude.value xspec_o, yspec_o = region(CWt - 5 * FWHMt, CWt + 5 * FWHMt, x, y) x_cont_o, y_cont_o = region_discontinua(CWt - 5 * FWHMt, CWt - 3 * FWHMt, CWt + 3 * FWHMt, CWt + 5 * FWHMt, x, y) cont3_o = poly_fit(x_cont_o, y_cont_o, orden_pol) res4_o = -cont3_o(xspec_o) + yspec_o #iteracion 1 t_init4_o = models.Gaussian1D(amplitude=Amplitud, mean=a_science, stddev=b_science) fit_t4_o = fitting.LevMarLSQFitter() t4_o = fit_t4_o(t_init4_o, xspec_o, res4_o) a_science = t4_o.mean.value b_science = t4_o.stddev.value Amplitud = t4_o.amplitude.value #iteracion 2 para gaussiana t_init4_o = models.Gaussian1D(amplitude=Amplitud, mean=a_science, stddev=b_science) fit_t4_o = fitting.LevMarLSQFitter() t4_o = fit_t4_o(t_init4_o, xspec_o, res4_o) residuo_o = -t4_o(xspec_o) + res4_o #print "resultados",t_init4,t4 #print "resultados",t4_o a_science = t4_o.mean.value b_science = t4_o.stddev.value c_science_amplitude = t4_o.amplitude.value #Aplicando FWHM guess guess_FWHM_gauss = -float(-b_science) #print guess_FWHM_gauss #exit(0) ##print t4.mean Lambda_gauss_fit_sci = "{:10.3f}".format(a_science) Sigma_gauss_fit_sci = "{:10.3f}".format(b_science) #print Lambda_gauss_fit_sci, Sigma_gauss_fit_sci #print a ,b central_wavelenght = t4_o.mean.value FWHM = t4_o.stddev.value Amplitude = t4_o.amplitude.value import numpy from scipy.optimize import curve_fit import matplotlib.pyplot as plt import math # Define model function to be used to fit to the data above: def gauss(x, *p): A, mu, sigma = p return A * numpy.exp(-(x - mu)**2 / (2. * sigma**2)) CW = t4_o.mean.value FWHM = 2 * sqrt(2 * math.log(2)) * t4_o.stddev.value A = t4_o.amplitude.value # def Momentos(x,*p) #INTEGRAL from scipy import integrate #def myfunc(x, a, b): # return (x**b) + a # ## These are the arguments that will be passed as a and b to myfunc() args = A, CW, FWHM #print args # ## Integrate myfunc() from 0.5 to 1.5 #print CW,CW-4*FWHM, CW+4*FWHM,FWHM #results = integrate.quad(gauss, min(x_sci), max(x_sci), args) results = integrate.romberg(gauss, CW - 5 * FWHM, CW + 5 * FWHM, args) #print results, gauss(CW-4*FWHM,A,CW,FWHM), gauss(CW+4*FWHM,A,CW,FWHM), gauss(CW+4*FWHM,A,CW,FWHM)*(CW+4*FWHM-(CW-4*FWHM)) #print x_cube,y_cube,A,CW,FWHM,results # print Lambda_gauss_fit_sci, Sigma_gauss_fit_sci #plt.plot(xspec_o,res4_o, 'c-',lw=1,label='gaus') #plt.plot(xspec_o,t4_o(xspec_o),'r-', lw=2,label='gauss') #plt.pause(0.005) #plt.clf() RESULTADO = int(x_cube), int(y_cube), float(A), float(CW), float( FWHM), float(results) return RESULTADO[0], RESULTADO[1], RESULTADO[2], RESULTADO[3], RESULTADO[ 4], RESULTADO[5]
def Flux_line( source_name, instru, line, row, column ): #row/colonne définit la position d'un pixel instru=MAPS/SPIRE_Map fit_g = fitting.LevMarLSQFitter() if instru == 'PACS': Wave_red, Flux_red = flux_PACS(source_name, 'R')[2], flux_PACS(source_name, 'R')[4] Wave_blue, Flux_blue = flux_PACS(source_name, 'B')[2], flux_PACS(source_name, 'B')[4] fit_g = fitting.LevMarLSQFitter() if line == 'NII_122_em' or line == 'OH_119' or line == 'OI_145' or line == 'NII_122_abs' or line == 'Thing_123': cond = np.where(np.isfinite(Flux_red[:, row, column])) Wave_R, Flux_R = Wave_red[cond], Flux_red[:, row, column][cond] if line == 'NIII_57' or line == 'OI_63': cond = np.where(np.isfinite(Flux_blue[:, row, column])) Wave_B, Flux_B = Wave_blue[cond], Flux_blue[:, row, column][cond] if np.shape(cond)[1] != 0: if line == 'NII_122_em': NII = Flux_R[np.where((Wave_R > 121.5) & (Wave_R < 122.5))] fit_line = fit_lines_gauss(Wave_R, Flux_R, NII, 'Em')[2] if line == 'NII_122_abs': NII = Flux_R[np.where((Wave_R > 121.5) & (Wave_R < 122.5))] fit_line = fit_lines_gauss(Wave_R, Flux_R, NII, 'Abs')[2] if line == 'Thing_123': Thing_123 = Flux_R[np.where((Wave_R > 122.5) & (Wave_R < 123.5))] fit_line = fit_lines_gauss(Wave_R, Flux_R, Thing_123, 'Em')[2] if line == 'NIII_57': NIII = Flux_B[np.where((Wave_B > 56) & (Wave_B < 58))] fit_line = fit_lines_gauss(Wave_B, Flux_B, NIII, 'Em')[2] if line == 'OH_119': OH_1 = Flux_R[np.where((Wave_R > 119.21) & (Wave_R < 119.25))] OH_2 = Flux_R[np.where((Wave_R > 119.42) & (Wave_R < 119.46))] g_init_OH_1, g_init_OH_2 = fit_lines_gauss( Wave_R, Flux_R, OH_1, 'Abs')[1], fit_lines_gauss(Wave_R, Flux_R, OH_2, 'Abs')[1] l_init_continuum = models.Polynomial1D(degree=2) g_line_OH = g_init_OH_1 + g_init_OH_2 + l_init_continuum fit_line = fit_g(g_line_OH, Wave_R, Flux_R) if line == 'OI_63': OI_63 = Flux_B[np.where((Wave_B > 62) & (Wave_B < 64))] fit_line = fit_lines_gauss(Wave_B, Flux_B, OI_63, 'Em')[2] if line == 'OI_145': OI_145 = Flux_R[np.where((Wave_R > 144.8) & (Wave_R < 145.8))] fit_line = fit_lines_gauss(Wave_R, Flux_R, OI_145, 'Em')[2] amplitude, std = abs(fit_line.amplitude_0[0]), fit_line.stddev_0[ 0] #On retrouve les paramètres A et sigma pour le calcul du flux des lines Line_flux = amplitude * std * np.sqrt(2 * np.pi) else: Line_flux = 0 if instru == 'SPIRE_Map': Wave_red, Flux_red = plot_image_flux_SPIRE_Map( source_name, 'HR', 'SSW')[2], plot_image_flux_SPIRE_Map(source_name, 'HR', 'SSW')[4] Wave_blue, Flux_blue = plot_image_flux_SPIRE_Map( source_name, 'HR', 'SLW')[2], plot_image_flux_SPIRE_Map(source_name, 'HR', 'SLW')[4] fit_g = fitting.LevMarLSQFitter() if line == 'NII_1461' or line == 'OH_971' or line == 'OH_1033' or line == 'H2O_1113' or line == 'H2O_1115': cond = np.where(np.isfinite(Flux_red[:, row, column])) Wave_R, Flux_R = Wave_red[cond], Flux_red[:, row, column][cond] if line == 'CI_10' or line == 'CI_21' or line == 'CO_43' or line == 'CO_54' or line == 'CO_65' or line == 'CO_76' or line == 'CO_87' or line == 'CH_835' or line == 'OH_909': cond = np.where(np.isfinite(Flux_blue[:, row, column])) Wave_B, Flux_B = Wave_blue[cond], Flux_blue[:, row, column][cond] if np.shape(cond)[1] != 0: #High frequencies if line == 'NII_1461': NII = Flux_R[np.where((Wave_R > 1450) & (Wave_R < 1466))] fit_line = fit_lines_sinc(Wave_R, Flux_R, NII, 'Em')[2] if line == 'HF_10': HF_10 = Flux_R[np.where((Wave_R > 1230) & (Wave_R < 1234))] fit_line = fit_lines_sinc(Wave_R, Flux_R, HF_10, 'Abs')[2] if line == 'H2O_1113': H2O_1113 = Flux_R[np.where((Wave_R > 1112) & (Wave_R < 1114))] fit_line = fit_lines_sinc(Wave_R, Flux_R, H2O_1113, 'Abs')[2] if line == 'H2O_1115': H2O_1115 = Flux_R[np.where((Wave_R > 1114) & (Wave_R < 1116))] fit_line = fit_lines_sinc(Wave_R, Flux_R, H2O_1115, 'Abs')[2] if line == 'OH_971': OH_971 = Flux_R[np.where((Wave_R > 970) & (Wave_R < 974))] fit_line = fit_lines_sinc(Wave_R, Flux_R, OH_971, 'Abs')[2] if line == 'OH_1033': OH_1033 = Flux_R[np.where((Wave_R > 1031) & (Wave_R < 1034))] fit_line = fit_lines_sinc(Wave_R, Flux_R, OH_1033, 'Abs')[2] #Low Frequencies if line == 'CI_10': CI_10 = Flux_B[np.where((Wave_B > 490) & (Wave_B < 493.8))] fit_line = fit_lines_sinc(Wave_B, Flux_B, CI_10, 'Em')[2] if line == 'CI_21': #if source_name=='MGE_4121': CI_21 = Flux_B[np.where((Wave_B > 807) & (Wave_B < 812))] fit_line = fit_lines_sinc(Wave_B, Flux_B, CI_21, 'Em')[2] if line == 'CO_43': CO_43 = Flux_B[np.where((Wave_B > 459) & (Wave_B < 463))] fit_line = fit_lines_sinc(Wave_B, Flux_B, CO_43, 'Em')[2] if line == 'CO_54': CO_54 = Flux_B[np.where((Wave_B > 573.5) & (Wave_B < 580))] fit_line = fit_lines_sinc(Wave_B, Flux_B, CO_54, 'Em')[2] if line == 'CO_65': CO_65 = Flux_B[np.where((Wave_B > 689) & (Wave_B < 693))] fit_line = fit_lines_sinc(Wave_B, Flux_B, CO_65, 'Em')[2] if line == 'CO_76': CO_65 = Flux_B[np.where((Wave_B > 804) & (Wave_B < 807))] fit_line = fit_lines_sinc(Wave_B, Flux_B, CO_76, 'Em')[2] if line == 'CO_87': CO_65 = Flux_B[np.where((Wave_B > 920) & (Wave_B < 922))] fit_line = fit_lines_sinc(Wave_B, Flux_B, CO_87, 'Em')[2] if line == 'CH_835': CH_835 = Flux_B[np.where((Wave_B > 834) & (Wave_B < 837))] fit_line = fit_lines_sinc(Wave_B, Flux_B, CH_835, 'Abs')[2] if line == 'OH_909': OH_909 = Flux_B[np.where((Wave_B > 907) & (Wave_B < 911))] fit_line = fit_lines_sinc(Wave_B, Flux_B, OH_909, 'Abs')[2] amplitude, std = abs(fit_line.amplitude_0[0]), abs( fit_line.sigma_0[0] ) #On retrouve les paramètres A et sigma pour le calcul du flux des lines Line_flux = amplitude * std * np.pi else: Line_flux = 0 return Line_flux
def test_deriv_2D(self, model_class, test_parameters): """ Test the derivative of a model by fitting with an estimated and analytical derivative. """ x_lim = test_parameters['x_lim'] y_lim = test_parameters['y_lim'] if model_class.fit_deriv is None: pytest.skip("Derivative function is not defined for model.") if issubclass(model_class, PolynomialBase): pytest.skip("Skip testing derivative of polynomials.") if "log_fit" in test_parameters: if test_parameters['log_fit']: x = np.logspace(x_lim[0], x_lim[1], self.N) y = np.logspace(y_lim[0], y_lim[1], self.M) else: x = np.linspace(x_lim[0], x_lim[1], self.N) y = np.linspace(y_lim[0], y_lim[1], self.M) xv, yv = np.meshgrid(x, y) try: model_with_deriv = create_model(model_class, test_parameters, use_constraints=False, parameter_key='deriv_initial') model_no_deriv = create_model(model_class, test_parameters, use_constraints=False, parameter_key='deriv_initial') model = create_model(model_class, test_parameters, use_constraints=False, parameter_key='deriv_initial') except KeyError: model_with_deriv = create_model(model_class, test_parameters, use_constraints=False) model_no_deriv = create_model(model_class, test_parameters, use_constraints=False) model = create_model(model_class, test_parameters, use_constraints=False) # add 10% noise to the amplitude rsn = np.random.RandomState(1234567890) amplitude = test_parameters['parameters'][0] n = 0.1 * amplitude * (rsn.rand(self.M, self.N) - 0.5) data = model(xv, yv) + n fitter_with_deriv = fitting.LevMarLSQFitter() new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv, data) fitter_no_deriv = fitting.LevMarLSQFitter() new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data, estimate_jacobian=True) assert_allclose(new_model_with_deriv.parameters, new_model_no_deriv.parameters, rtol=0.1)
def test_nircam_coron_wfe_offset(fov_pix=15, oversample=2, fit_gaussian=True): """ Test offset of LW coronagraphic PSF w.r.t. wavelength due to optical wedge dispersion. Option to fit a Gaussian to PSF core in order to better determine peak position. Difference from 2.5 to 3.3 um should be ~0.015mm. Difference from 3.3 to 5.0 um should be ~0.030mm. """ # Disable Gaussian fit if astropy not installed if fit_gaussian: try: from astropy.modeling import models, fitting except ImportError: fit_gaussian = False # Ensure oversample to >1 no Gaussian fitting if fit_gaussian == False: oversample = 2 if oversample < 2 else oversample rtol = 0.2 else: rtol = 0.1 # Set up an off-axis coronagraphic PSF inst = webbpsf_core.NIRCam() inst.filter = 'F335M' inst.pupil_mask = 'CIRCLYOT' inst.image_mask = None inst.include_si_wfe = True inst.options['jitter'] = None # size of an oversampled pixel in mm (detector pixels are 18um) mm_per_pix = 18e-3 / oversample # Investigate the differences between three wavelengths warr = np.array([2.5, 3.3, 5.0]) # Find PSF position for each wavelength yloc = [] for w in warr: hdul = inst.calc_psf(monochromatic=w * 1e-6, oversample=oversample, add_distortion=False, fov_pixels=fov_pix) # Vertical image cross section of oversampled PSF im = hdul[0].data sh = im.shape xvals = mm_per_pix * (np.arange(sh[0]) - sh[0] / 2) yvals = im[:, int(sh[1] / 2)] # Fit 1D Gaussian to vertical cross section of PSF if fit_gaussian: # Create Gaussian model fit of PSF core to determine y offset g_init = models.Gaussian1D(amplitude=yvals.max(), mean=0, stddev=0.01) fit_g = fitting.LevMarLSQFitter() g = fit_g(g_init, xvals, yvals) yloc.append(g.mean.value) else: # Just use PSF max location yloc.append(xvals[yvals == yvals.max()][0]) yloc = np.array(yloc) # Difference from 2.5 to 3.3 um should be ~0.015mm diff_25_33 = np.abs(yloc[0] - yloc[1]) assert np.allclose( diff_25_33, 0.016, rtol=rtol ), "PSF shift between {:.2f} and {:.2f} um of {:.3f} mm does not match expected value (~0.016 mm).".format( warr[1], warr[0], diff_25_33) # Difference from 3.3 to 5.0 um should be ~0.030mm diff_50_33 = np.abs(yloc[2] - yloc[1]) assert np.allclose( diff_50_33, 0.032, rtol=rtol ), "PSF shift between {:.2f} and {:.2f} um of {:.3f} mm does not match expected value (~0.032 mm).".format( warr[1], warr[2], diff_50_33)
def test_gaussian2d_positive_stddev(): # This is 2D Gaussian with noise to be fitted, as provided by @ysBach test = [ [ -54.33, 13.81, -34.55, 8.95, -143.71, -0.81, 59.25, -14.78, -204.9, -30.87, -124.39, 123.53, 70.81, -109.48, -106.77, 35.64, 18.29 ], [ -126.19, -89.13, 63.13, 50.74, 61.83, 19.06, 65.7, 77.94, 117.14, 139.37, 52.57, 236.04, 100.56, 242.28, -180.62, 154.02, -8.03 ], [ 91.43, 96.45, -118.59, -174.58, -116.49, 80.11, -86.81, 14.62, 79.26, 7.56, 54.99, 260.13, -136.42, -20.77, -77.55, 174.52, 134.41 ], [ 33.88, 7.63, 43.54, 70.99, 69.87, 33.97, 273.75, 176.66, 201.94, 336.34, 340.54, 163.77, -156.22, 21.49, -148.41, 94.88, 42.55 ], [ 82.28, 177.67, 26.81, 17.66, 47.81, -31.18, 353.23, 589.11, 553.27, 242.35, 444.12, 186.02, 140.73, 75.2, -87.98, -18.23, 166.74 ], [ 113.09, -37.01, 134.23, 71.89, 107.88, 198.69, 273.88, 626.63, 551.8, 547.61, 580.35, 337.8, 139.8, 157.64, -1.67, -26.99, 37.35 ], [ 106.47, 31.97, 84.99, -125.79, 195.0, 493.65, 861.89, 908.31, 803.9, 781.01, 532.59, 404.67, 115.18, 111.11, 28.08, 122.05, -58.36 ], [ 183.62, 45.22, 40.89, 111.58, 425.81, 321.53, 545.09, 866.02, 784.78, 731.35, 609.01, 405.41, -19.65, 71.2, -140.5, 144.07, 25.24 ], [ 137.13, -86.95, 15.39, 180.14, 353.23, 699.01, 1033.8, 1014.49, 814.11, 647.68, 461.03, 249.76, 94.8, 41.17, -1.16, 183.76, 188.19 ], [ 35.39, 26.92, 198.53, -37.78, 638.93, 624.41, 816.04, 867.28, 697.0, 491.56, 378.21, -18.46, -65.76, 98.1, 12.41, -102.18, 119.05 ], [ 190.73, 125.82, 311.45, 369.34, 554.39, 454.37, 755.7, 736.61, 542.43, 188.24, 214.86, 217.91, 7.91, 27.46, -172.14, -82.36, -80.31 ], [ -55.39, 80.18, 267.19, 274.2, 169.53, 327.04, 488.15, 437.53, 225.38, 220.94, 4.01, -92.07, 39.68, 57.22, 144.66, 100.06, 34.96 ], [ 130.47, -4.23, 46.3, 101.49, 115.01, 217.38, 249.83, 115.9, 87.36, 105.81, -47.86, -9.94, -82.28, 144.45, 83.44, 23.49, 183.9 ], [ -110.38, -115.98, 245.46, 103.51, 255.43, 163.47, 56.52, 33.82, -33.26, -111.29, 88.08, 193.2, -100.68, 15.44, 86.32, -26.44, -194.1 ], [ 109.36, 96.01, -124.89, -16.4, 84.37, 114.87, -65.65, -58.52, -23.22, 42.61, 144.91, -209.84, 110.29, 66.37, -117.85, -147.73, -122.51 ], [ 10.94, 45.98, 118.12, -46.53, -72.14, -74.22, 21.22, 0.39, 86.03, 23.97, -45.42, 12.05, -168.61, 27.79, 61.81, 84.07, 28.79 ], [ 46.61, -104.11, 56.71, -90.85, -16.51, -66.45, -141.34, 0.96, 58.08, 285.29, -61.41, -9.01, -323.38, 58.35, 80.14, -101.22, 145.65 ] ] g_init = models.Gaussian2D(x_mean=8, y_mean=8) fitter = fitting.LevMarLSQFitter() y, x = np.mgrid[:17, :17] g_fit = fitter(g_init, x, y, test) # Compare with @ysBach original result: # - x_stddev was negative, so its abs value is used for comparison here. # - theta is beyond (-90, 90) deg, which doesn't make sense, so ignored. assert_allclose([g_fit.amplitude.value, g_fit.y_stddev.value], [984.7694929790363, 3.1840618351417307], rtol=1.5e-6) assert_allclose(g_fit.x_mean.value, 7.198391516587464) assert_allclose(g_fit.y_mean.value, 7.49720660088511, rtol=5e-7) assert_allclose(g_fit.x_stddev.value, 1.9840185107597297, rtol=2e-6)
def fitting(wave_range, flux_range, z, plot=0): set_params = set_parameters(z) elines = set_params[0] redshift = set_params[1] z = redshift[0] z_min, z_max = redshift[1], redshift[2] disp = set_params[2] fit_sigma, sigma, sigma_min, sigma_max = disp[0], disp[1], disp[2], disp[3] link = np.asarray(set_params[3]) eline_link = set_params[4] type = set_params[5] n_elines = np.count_nonzero(~np.isnan(elines)) if n_elines == 1: def G_model(x, A0=1, sigma0=2, z00=z): l_0 = elines[0] * (1 + z00) model0 = A0 * np.exp(-0.5 * (x - l_0)**2 / (sigma0**2)) return model0 + model1 def G_deriv(x, A0=1, sigma0=2, z00=z): # Jacobian of G_model l_0 = elines[0] * (1 + z00) y0 = (x - l_0) / (sigma0) model0 = A0 * np.exp(-0.5 * y0**2) d_A0 = np.exp(-0.5 * y0**2) d_sigma0 = A0 * d_A0 * (x - l_0)**2 / sigma0**3 d_z00 = elines[0] * A0 * d_A0 * (x - l_0) / sigma0**2 return [d_A0, d_sigma0, d_z00] # initialize fitters from astropy.modeling import models, fitting fit = fitting.LevMarLSQFitter() or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=3, sigma=3.0) GaussModel = custom_model(G_model, fit_deriv=G_deriv) model = GaussModel() or_fitted_model, mask = or_fit(model, wave_range, flux_range) A0_best, sigma_best, z_best = mask.A0, mask.sigma0, mask.z00 if plot == 1: plt.plot(wave_range, flux_range, 'k+') plt.plot(wave_range, G_model(wave_range, A0_best, sigma_best, z_best), "k-") return [sigma_best, [A0_best], z_best, or_fitted_model] #best_vals if n_elines == 2: def G_model(x, A0=1, A1=1, sigma0=2, z00=z): l_0 = elines[0] * (1 + z00) l_1 = elines[1] * (1 + z00) model0 = A0 * np.exp(-0.5 * (x - l_0)**2 / (sigma0**2)) model1 = A1 * np.exp(-0.5 * (x - l_1)**2 / (sigma0**2)) return model0 + model1 def G_deriv(x, A0=1, A1=1, sigma0=2, z00=z): # Jacobian of G_model l_0 = elines[0] * (1 + z00) l_1 = elines[1] * (1 + z00) y0 = (x - l_0) / (sigma0) y1 = (x - l_1) / (sigma0) model0 = A0 * np.exp(-0.5 * y0**2) model1 = A1 * np.exp(-0.5 * y1**2) d_A0 = np.exp(-0.5 * y0**2) d_A1 = np.exp(-0.5 * y1**2) d_sigma0 = A0 * d_A0 * (x - l_0)**2 / sigma0**3 + A1 * d_A1 * ( x - l_1)**2 / sigma0**3 d_z00 = elines[0] * A0 * d_A0 * (x - l_0) / sigma0**2 + elines[ 1] * A1 * d_A1 * (x - l_1) / sigma0**2 return [d_A0, d_A1, d_sigma0, d_z00] if 0 in link: #Amplitud que no se va a ajustar link_index_0 = np.where(link == 0)[0][0] #Las que si se van a ajustar link_index_1 = np.where(link == 1)[0][0] #A que linea se va a linkear. eline_to_link = eline_link[link_index_0] if link_index_0 == 0 and eline_to_link == 1: A0_best = A1_best / 3. if link_index_0 == 1 and eline_to_link == 0: A1_best = A0_best / 3. # initialize fitters from astropy.modeling import models, fitting fit = fitting.LevMarLSQFitter() or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=3, sigma=3.0) GaussModel = custom_model(G_model, fit_deriv=G_deriv) model = GaussModel() or_fitted_model, mask = or_fit(model, wave_range, flux_range) A0_best, A1_best, sigma_best, z_best = mask.A0, mask.A1, mask.sigma0, mask.z00 if plot == 1: plt.plot(wave_range, flux_range, 'k+') plt.plot(wave_range, G_model(wave_range, A0_best, A1_best, sigma_best, z_best), "k-") return [sigma_best, [A0_best, A1_best], z_best, or_fitted_model] #best_vals if n_elines == 3: ################################################################################ def G_model(x, A0=1, A1=1, A2=1, sigma0=2, z00=z): l_0 = elines[0] * (1 + z00) l_1 = elines[1] * (1 + z00) l_2 = elines[2] * (1 + z00) model0 = A0 * np.exp(-0.5 * (x - l_0)**2 / (sigma0**2)) model1 = A1 * np.exp(-0.5 * (x - l_1)**2 / (sigma0**2)) model2 = A2 * np.exp(-0.5 * (x - l_2)**2 / (sigma0**2)) #A0=A2/3. return model0 + model1 + model2 def G_deriv(x, A0=1, A1=1, A2=1, sigma0=2, z00=z): # Jacobian of G_model l_0 = elines[0] * (1 + z00) l_1 = elines[1] * (1 + z00) l_2 = elines[2] * (1 + z00) y0 = (x - l_0) / (sigma0) y1 = (x - l_1) / (sigma0) y2 = (x - l_2) / (sigma0) #A0=A2/3. model0 = A0 * np.exp(-0.5 * y0**2) model1 = A1 * np.exp(-0.5 * y1**2) model2 = A2 * np.exp(-0.5 * y2**2) d_A0 = np.exp(-0.5 * y0**2) d_A1 = np.exp(-0.5 * y1**2) d_A2 = np.exp(-0.5 * y2**2) d_sigma0 = A0 * d_A0 * (x - l_0)**2 / sigma0**3 + A1 * d_A1 * ( x - l_1)**2 / sigma0**3 + A2 * d_A2 * (x - l_2)**2 / sigma0**3 d_z00 = elines[0] * A0 * d_A0 * ( x - l_0) / sigma0**2 + elines[1] * A1 * d_A1 * ( x - l_1) / sigma0**2 + elines[2] * A2 * d_A2 * ( x - l_2) / sigma0**2 return [d_A0, d_A1, d_A2, d_sigma0, d_z00] # initialize fitters from astropy.modeling import models, fitting fit = fitting.LevMarLSQFitter() #or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip,niter=1, sigma=3) GaussModel = custom_model(G_model, fit_deriv=G_deriv) model = GaussModel( bounds={ "sigma0": (1., 10.), "z00": (z - 400. / 3e5, z + 400. / 3e5), "A1": (0.2, 1.5) }) #or_fitted_model, mask = or_fit(model, wave_range,flux_range) fitted_model = fit(model, wave_range, flux_range) #A0_best,A1_best,A2_best,sigma_best,z_best=mask.A2,mask.A1,mask.A2,mask.sigma0,mask.z00 A0_best, A1_best, A2_best, sigma_best, z_best = fitted_model.A2.value, fitted_model.A1.value, fitted_model.A2.value, fitted_model.sigma0.value, fitted_model.z00.value if 0 in link: #Amplitud que no se va a ajustar link_index_0 = np.where(link == 0)[0][0] #Las que si se van a ajustar link_index_1 = np.where(link == 1)[0][0] #A que linea se va a linkear. eline_to_link = eline_link[link_index_0] if link_index_0 == 0 and eline_to_link == 2: A0_best = A2_best / 3. if link_index_0 == 0 and eline_to_link == 1: A0_best = A1_best / 3. if link_index_0 == 1 and eline_to_link == 0: A1_best = A0_best / 3. if link_index_0 == 1 and eline_to_link == 2: A1_best = A2_best / 3. if link_index_0 == 2 and eline_to_link == 0: A2_best = A0_best / 3. if link_index_0 == 2 and eline_to_link == 1: A2_best = A1_best / 3. ################################################################################ if plot == 1: plt.plot(wave_range, flux_range, 'k+') plt.plot( wave_range, G_model(wave_range, A0_best, A1_best, A2_best, sigma_best, z_best), "k-") plt.plot(wave_range, or_fitted_model, "ro") return [sigma_best, [A0_best, A1_best, A2_best], z_best] #best_vals
def poly_fit(xp, yp, grado_pol): t_init = polynomial.Polynomial1D(degree=int(grado_pol)) fit_t = fitting.LevMarLSQFitter() t = fit_t(t_init, xp, yp) return t
def fit_2dgaussian(array, crop=False, cent=None, cropsize=15, fwhmx=4, fwhmy=4, theta=0, threshold=False, sigfactor=6, full_output=False, debug=False): """ Fitting a 2D Gaussian to the 2D distribution of the data. Parameters ---------- array : array_like Input frame with a single PSF. crop : bool, optional If True an square sub image will be cropped. cent : tuple of int, optional X,Y integer position of source in the array for extracting the subimage. If None the center of the frame is used for cropping the subframe (the PSF is assumed to be ~ at the center of the frame). cropsize : int, optional Size of the subimage. fwhmx, fwhmy : float, optional Initial values for the standard deviation of the fitted Gaussian, in px. theta : float, optional Angle of inclination of the 2d Gaussian counting from the positive X axis. threshold : bool, optional If True the background pixels (estimated using sigma clipped statistics) will be replaced by small random Gaussian noise. sigfactor : int, optional The background pixels will be thresholded before fitting a 2d Gaussian to the data using sigma clipped statistics. All values smaller than (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian noise. full_output : bool, optional If False it returns just the centroid, if True also returns the FWHM in X and Y (in pixels), the amplitude and the rotation angle. debug : bool, optional If True, the function prints out parameters of the fit and plots the data, model and residuals. Returns ------- mean_y : float Source centroid y position on input array from fitting. mean_x : float Source centroid x position on input array from fitting. If ``full_output`` is True it returns a Pandas dataframe containing the following columns: 'amplitude' : Float value. Amplitude of the Gaussian. 'centroid_x' : Float value. X coordinate of the centroid. 'centroid_y' : Float value. Y coordinate of the centroid. 'fwhm_x' : Float value. FHWM in X [px]. 'fwhm_y' : Float value. FHWM in Y [px]. 'theta' : Float value. Rotation angle. """ if array.ndim != 2: raise TypeError('Input array is not a frame or 2d array') if crop: if cent is None: ceny, cenx = frame_center(array) else: cenx, ceny = cent imside = array.shape[0] psf_subimage, suby, subx = get_square(array, min(cropsize, imside), ceny, cenx, position=True) else: psf_subimage = array.copy() if threshold: _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2) indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd) subimnoise = np.random.randn(psf_subimage.shape[0], psf_subimage.shape[1]) * clipstd psf_subimage[indi] = subimnoise[indi] # Creating the 2D Gaussian model init_amplitude = np.ptp(psf_subimage) xcom, ycom = photutils.centroid_com(psf_subimage) gauss = models.Gaussian2D(amplitude=init_amplitude, theta=theta, x_mean=xcom, y_mean=ycom, x_stddev=fwhmx * gaussian_fwhm_to_sigma, y_stddev=fwhmy * gaussian_fwhm_to_sigma) # Levenberg-Marquardt algorithm fitter = fitting.LevMarLSQFitter() y, x = np.indices(psf_subimage.shape) fit = fitter(gauss, x, y, psf_subimage) if crop: mean_y = fit.y_mean.value + suby mean_x = fit.x_mean.value + subx else: mean_y = fit.y_mean.value mean_x = fit.x_mean.value fwhm_y = fit.y_stddev.value*gaussian_sigma_to_fwhm fwhm_x = fit.x_stddev.value*gaussian_sigma_to_fwhm amplitude = fit.amplitude.value theta = np.rad2deg(fit.theta.value) if debug: if threshold: msg = ['Subimage thresholded', 'Model', 'Residuals'] else: msg = ['Subimage', 'Model', 'Residuals'] pp_subplots(psf_subimage, fit(x, y), psf_subimage-fit(x, y), grid=True, gridspacing=1, label=msg) print('FWHM_y =', fwhm_y) print('FWHM_x =', fwhm_x, '\n') print('centroid y =', mean_y) print('centroid x =', mean_x) print('centroid y subim =', fit.y_mean.value) print('centroid x subim =', fit.x_mean.value, '\n') print('amplitude =', amplitude) print('theta =', theta) if full_output: return pd.DataFrame({'centroid_y': mean_y, 'centroid_x': mean_x, 'fwhm_y': fwhm_y, 'fwhm_x': fwhm_x, 'amplitude': amplitude, 'theta': theta}, index=[0]) else: return mean_y, mean_x
def run(self) -> None: """ Run method of the module. Locates the position of the calibration spots in the center frame. From the four spots, the position of the star behind the coronagraph is fitted, and the images are shifted and cropped. Returns ------- NoneType None """ @typechecked def _get_center( image_number: int, center: Optional[Tuple[int, int]] ) -> Tuple[np.ndarray, Tuple[int, int]]: if center_shape[-3] > 1: warnings.warn( 'Multiple center images found. Using the first image of the stack.' ) if ndim == 3: center_frame = self.m_center_in_port[0, ] elif ndim == 4: center_frame = self.m_center_in_port[image_number, 0, ] if center is None: center = center_pixel(center_frame) else: center = (int(np.floor(center[0])), int(np.floor(center[1]))) return center_frame, center center_shape = self.m_center_in_port.get_shape() im_shape = self.m_image_in_port.get_shape() ndim = self.m_image_in_port.get_ndim() center_frame, self.m_center = _get_center(0, self.m_center) # Read in wavelength information or set it to default values if ndim == 4: wavelength = self.m_image_in_port.get_attribute('WAVELENGTH') if wavelength is None: raise ValueError( 'The wavelength information is required to centre IFS data. ' 'Please add it via the WavelengthReadingModule before using ' 'the WaffleCenteringModule.') if im_shape[0] != center_shape[0]: raise ValueError( f'Number of science wavelength channels: {im_shape[0]}. ' f'Number of center wavelength channels: {center_shape[0]}. ' 'Exactly one center image per wavelength is required.') wavelength_min = np.min(wavelength) elif ndim == 3: # for none ifs data, use default value wavelength = [1.] wavelength_min = 1. # check if science and center images have the same shape if im_shape[-2:] != center_shape[-2:]: raise ValueError( 'Science and center images should have the same shape.') # Setting angle via pattern (used for backwards compability) if self.m_pattern is not None: if self.m_pattern == 'x': self.m_angle = 45. elif self.m_pattern == '+': self.m_angle = 0. else: raise ValueError( f'The pattern {self.m_pattern} is not valid. Please select ' f'either \'x\' or \'+\'.') warnings.warn( f'The \'pattern\' parameter will be deprecated in a future release. ' f'Please Use the \'angle\' parameter instead and set it to ' f'{self.m_angle} degrees.', DeprecationWarning) pixscale = self.m_image_in_port.get_attribute('PIXSCALE') self.m_sigma /= pixscale if self.m_size is not None: self.m_size = int(math.ceil(self.m_size / pixscale)) if self.m_dither: dither_x = self.m_image_in_port.get_attribute('DITHER_X') dither_y = self.m_image_in_port.get_attribute('DITHER_Y') nframes = self.m_image_in_port.get_attribute('NFRAMES') nframes = np.cumsum(nframes) nframes = np.insert(nframes, 0, 0) # size of center image, only works with odd value ref_image_size = 21 # Arrays for the positions x_pos = np.zeros(4) y_pos = np.zeros(4) # Arrays for the center position for each wavelength x_center = np.zeros((len(wavelength))) y_center = np.zeros((len(wavelength))) # Loop for 4 waffle spots for w, wave_nr in enumerate(wavelength): # Prapre centering frame center_frame, _ = _get_center(w, self.m_center) center_frame_unsharp = center_frame - gaussian_filter( input=center_frame, sigma=self.m_sigma) for i in range(4): # Approximate positions of waffle spots radius = self.m_radius * wave_nr / wavelength_min x_0 = np.floor(self.m_center[0] + radius * np.cos(self.m_angle * np.pi / 180 + np.pi / 4. * (2 * i))) y_0 = np.floor(self.m_center[1] + radius * np.sin(self.m_angle * np.pi / 180 + np.pi / 4. * (2 * i))) tmp_center_frame = crop_image(image=center_frame_unsharp, center=(int(y_0), int(x_0)), size=ref_image_size) # find maximum in tmp image coords = np.unravel_index(indices=np.argmax(tmp_center_frame), shape=tmp_center_frame.shape) y_max, x_max = coords[0], coords[1] pixmax = tmp_center_frame[y_max, x_max] max_pos = np.array([x_max, y_max]).reshape(1, 2) # Check whether it is the correct maximum: second brightest pixel should be nearby tmp_center_frame[y_max, x_max] = 0. # introduce distance parameter dist = np.inf while dist > 2: coords = np.unravel_index( indices=np.argmax(tmp_center_frame), shape=tmp_center_frame.shape) y_max_new, x_max_new = coords[0], coords[1] pixmax_new = tmp_center_frame[y_max_new, x_max_new] # Caculate minimal distance to previous points tmp_center_frame[y_max_new, x_max_new] = 0. dist = np.amin( np.linalg.norm(np.vstack((max_pos[:, 0] - x_max_new, max_pos[:, 1] - y_max_new)), axis=0)) if dist <= 2 and pixmax_new < pixmax: break max_pos = np.vstack((max_pos, [x_max_new, y_max_new])) x_max = x_max_new y_max = y_max_new pixmax = pixmax_new x_0 = x_0 - (ref_image_size - 1) / 2 + x_max y_0 = y_0 - (ref_image_size - 1) / 2 + y_max # create reference image around determined maximum ref_center_frame = crop_image(image=center_frame_unsharp, center=(int(y_0), int(x_0)), size=ref_image_size) # Fit the data using astropy.modeling gauss_init = models.Gaussian2D( amplitude=np.amax(ref_center_frame), x_mean=x_0, y_mean=y_0, x_stddev=1., y_stddev=1., theta=0.) fit_gauss = fitting.LevMarLSQFitter() y_grid, x_grid = np.mgrid[y_0 - (ref_image_size - 1) / 2:y_0 + (ref_image_size - 1) / 2 + 1, x_0 - (ref_image_size - 1) / 2:x_0 + (ref_image_size - 1) / 2 + 1] gauss = fit_gauss(gauss_init, x_grid, y_grid, ref_center_frame) x_pos[i] = gauss.x_mean.value y_pos[i] = gauss.y_mean.value # Find star position as intersection of two lines x_center[w] = ((y_pos[0]-x_pos[0]*(y_pos[2]-y_pos[0])/(x_pos[2]-float(x_pos[0]))) - (y_pos[1]-x_pos[1]*(y_pos[1]-y_pos[3])/(x_pos[1]-float(x_pos[3])))) / \ ((y_pos[1]-y_pos[3])/(x_pos[1]-float(x_pos[3])) - (y_pos[2]-y_pos[0])/(x_pos[2]-float(x_pos[0]))) y_center[w] = x_center[w]*(y_pos[1]-y_pos[3])/(x_pos[1]-float(x_pos[3])) + \ (y_pos[1]-x_pos[1]*(y_pos[1]-y_pos[3])/(x_pos[1]-float(x_pos[3]))) # Adjust science images nimages = self.m_image_in_port.get_shape()[-3] npix = self.m_image_in_port.get_shape()[-2] nwavelengths = len(wavelength) start_time = time.time() for i in range(nimages): im_storage = [] for j in range(nwavelengths): im_index = i * nwavelengths + j progress(im_index, nimages * nwavelengths, 'Centering the images...', start_time) if ndim == 3: image = self.m_image_in_port[i, ] elif ndim == 4: image = self.m_image_in_port[j, i, ] shift_yx = np.array([ (float(im_shape[-2]) - 1.) / 2. - y_center[j], (float(im_shape[-1]) - 1.) / 2. - x_center[j] ]) if self.m_dither: index = np.digitize(i, nframes, right=False) - 1 shift_yx[0] -= dither_y[index] shift_yx[1] -= dither_x[index] if npix % 2 == 0 and self.m_size is not None: im_tmp = np.zeros((image.shape[0] + 1, image.shape[1] + 1)) im_tmp[:-1, :-1] = image image = im_tmp shift_yx[0] += 0.5 shift_yx[1] += 0.5 im_shift = shift_image(image, shift_yx, 'spline') if self.m_size is not None: im_crop = crop_image(im_shift, None, self.m_size) im_storage.append(im_crop) else: im_storage.append(im_shift) if ndim == 3: self.m_image_out_port.append(im_storage[0], data_dim=3) elif ndim == 4: self.m_image_out_port.append(np.asarray(im_storage), data_dim=4) print(f'Center [x, y] = [{x_center}, {y_center}]') history = f'[x, y] = [{round(x_center[j], 2)}, {round(y_center[j], 2)}]' self.m_image_out_port.copy_attributes(self.m_image_in_port) self.m_image_out_port.add_history('WaffleCenteringModule', history) self.m_image_out_port.close_port()
def fit_2dairydisk(array, crop=False, cent=None, cropsize=15, fwhm=4, threshold=False, sigfactor=6, full_output=False, debug=False): """ Fitting a 2D Moffat to the 2D distribution of the data. Parameters ---------- array : array_like Input frame with a single PSF. crop : bool, optional If True an square sub image will be cropped. cent : tuple of int, optional X,Y integer position of source in the array for extracting the subimage. If None the center of the frame is used for cropping the subframe (the PSF is assumed to be ~ at the center of the frame). cropsize : int, optional Size of the subimage. fwhm : float, optional Initial values for the FWHM of the fitted 2d Moffat, in px. threshold : bool, optional If True the background pixels (estimated using sigma clipped statistics) will be replaced by small random Gaussian noise. sigfactor : int, optional The background pixels will be thresholded before fitting a 2d Moffat to the data using sigma clipped statistics. All values smaller than (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian noise. full_output : bool, optional If False it returns just the centroid, if True also returns the FWHM in X and Y (in pixels), the amplitude and the rotation angle. debug : bool, optional If True, the function prints out parameters of the fit and plots the data, model and residuals. Returns ------- mean_y : float Source centroid y position on input array from fitting. mean_x : float Source centroid x position on input array from fitting. If ``full_output`` is True it returns a Pandas dataframe containing the following columns: 'alpha': Float value. Alpha parameter. 'amplitude' : Float value. Moffat Amplitude. 'centroid_x' : Float value. X coordinate of the centroid. 'centroid_y' : Float value. Y coordinate of the centroid. 'fwhm' : Float value. FHWM [px]. 'gamma' : Float value. Gamma parameter. """ if array.ndim != 2: raise TypeError('Input array is not a frame or 2d array') if crop: if cent is None: ceny, cenx = frame_center(array) else: cenx, ceny = cent imside = array.shape[0] psf_subimage, suby, subx = get_square(array, min(cropsize, imside), ceny, cenx, position=True) else: psf_subimage = array.copy() if threshold: _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2) indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd) subimnoise = np.random.randn(psf_subimage.shape[0], psf_subimage.shape[1]) * clipstd psf_subimage[indi] = subimnoise[indi] # Creating the 2d Airy disk model init_amplitude = np.ptp(psf_subimage) xcom, ycom = photutils.centroid_com(psf_subimage) diam_1st_zero = (fwhm * 2.44) / 1.028 airy = models.AiryDisk2D(amplitude=init_amplitude, x_0=xcom, y_0=ycom, radius=diam_1st_zero/2.) # Levenberg-Marquardt algorithm fitter = fitting.LevMarLSQFitter() y, x = np.indices(psf_subimage.shape) fit = fitter(airy, x, y, psf_subimage) if crop: mean_y = fit.y_0.value + suby mean_x = fit.x_0.value + subx else: mean_y = fit.y_0.value mean_x = fit.x_0.value amplitude = fit.amplitude.value radius = fit.radius.value fwhm = ((radius * 1.028) / 2.44) * 2 if debug: if threshold: msg = ['Subimage thresholded', 'Model', 'Residuals'] else: msg = ['Subimage', 'Model', 'Residuals'] pp_subplots(psf_subimage, fit(x, y), psf_subimage - fit(x, y), grid=True, gridspacing=1, label=msg) print('FWHM =', fwhm) print('centroid y =', mean_y) print('centroid x =', mean_x) print('centroid y subim =', fit.y_0.value) print('centroid x subim =', fit.x_0.value, '\n') print('amplitude =', amplitude) print('radius =', radius) if full_output: return pd.DataFrame({'centroid_y': mean_y, 'centroid_x': mean_x, 'fwhm': fwhm, 'radius': radius, 'amplitude': amplitude}, index=[0]) else: return mean_y, mean_x
def find_stars_single(img_file, fwhm, threshold, N_passes, plot_psf_compare, mask_file, sharp_lim, peak_max): pid = mp.current_process().pid print(f' p{pid} - Working on image: {img_file}') img, hdr = fits.getdata(img_file, header=True, ignore_missing_end=True) mask = fits.getdata(mask_file).astype('bool') img = np.ma.masked_array(img, mask=mask) fwhm_curr = fwhm # Calculate the bacgkround and noise (iteratively) print(f' p{pid} - Calculating background') bkg_threshold_above = 1 bkg_threshold_below = 3 good_pix = np.where(np.isfinite(img)) for nn in range(5): bkg_mean = img[good_pix].mean() bkg_std = img[good_pix].std() bad_hi = bkg_mean + (bkg_threshold_above * bkg_std) bad_lo = bkg_mean - (bkg_threshold_below * bkg_std) good_pix = np.where((img > bad_lo) & (img < bad_hi)) bkg_mean = img[good_pix].mean() bkg_std = img[good_pix].std() img_threshold = threshold * bkg_std print(f' p{pid} - Bkg = {bkg_mean:.2f} +/- {bkg_std:.2f}') print(f' p{pid} - Bkg Threshold = {img_threshold:.2f}') # Detect stars print(f' p{pid} - Detecting Stars') # Each pass will have an updated fwhm for the PSF. for nn in range(N_passes): print(f' p{pid} - Pass {nn:d} assuming FWHM = {fwhm_curr:.1f}') daofind = DAOStarFinder(fwhm=fwhm_curr, threshold=img_threshold, exclude_border=True, sharplo=-sharp_lim, sharphi=sharp_lim, peakmax=peak_max) sources = daofind(img - bkg_mean, mask=mask) print( f' p{pid} - {len(sources)} sources found, now fitting for FWHM.' ) # Calculate FWHM for each detected star. x_fwhm = np.zeros(len(sources), dtype=float) y_fwhm = np.zeros(len(sources), dtype=float) theta = np.zeros(len(sources), dtype=float) # Calculate measure of fits fvu = np.zeros(len(sources), dtype=float) lss = np.zeros(len(sources), dtype=float) mfr = np.zeros(len(sources), dtype=float) # We will actually be resampling the images for the Gaussian fits. resamp = 1 #2 #BUG - changed this value for testing bin1 open loop cutout_half_size = int(round(fwhm_curr * 3.5)) cutout_size = 2 * cutout_half_size + 1 # Define variables to hold final averages PSFs final_psf_obs = np.zeros((cutout_size * resamp, cutout_size * resamp), dtype=float) final_psf_mod = np.zeros((cutout_size * resamp, cutout_size * resamp), dtype=float) final_psf_count = 0 # Setup our gaussian fitter with some good initial guesses. sigma_init_guess = fwhm_curr * gaussian_fwhm_to_sigma g2d_model = models.Gaussian2D(1.0, cutout_half_size * resamp, cutout_half_size * resamp, sigma_init_guess * resamp, sigma_init_guess * resamp, theta=0, bounds={ 'x_stddev': [0, fwhm * 2 * resamp], 'y_stddev': [0, fwhm * 2 * resamp], 'amplitude': [0, 2] }) c2d_model = models.Const2D(0.0) the_model = g2d_model + c2d_model the_fitter = fitting.LevMarLSQFitter() cut_y, cut_x = np.mgrid[:cutout_size, :cutout_size] for ss in range(len(sources)): x_lo = int(round(sources[ss]['xcentroid'] - cutout_half_size)) x_hi = x_lo + cutout_size y_lo = int(round(sources[ss]['ycentroid'] - cutout_half_size)) y_hi = y_lo + cutout_size cutout_tmp = img[y_lo:y_hi, x_lo:x_hi].astype(float) if ((cutout_tmp.shape[0] != cutout_size) | (cutout_tmp.shape[1] != cutout_size)): # Edge source... fitting is no good continue # Oversample the image cutout_resamp = scipy.ndimage.zoom(cutout_tmp, resamp, order=1) #cutout_resamp /= cutout_resamp.sum() #normed sum to 1 cutout_resamp /= cutout_resamp.max( ) #normed peak to 1 # BUG: what if bright outlier? cut_y_resamp, cut_x_resamp = np.mgrid[:cutout_size * resamp, :cutout_size * resamp] # Fit a 2D gaussian + constant with warnings.catch_warnings(): # Suppress warnings... too many. warnings.simplefilter("ignore", category=UserWarning) warnings.simplefilter("ignore", category=AstropyWarning) g2d_params = the_fitter( the_model, cut_x_resamp, cut_y_resamp, cutout_resamp, epsilon=1e-12, acc=1e-12, maxiter=300, weights=None) #added values for better fit g2d_image = g2d_params(cut_x_resamp, cut_y_resamp) # Catch bad fits and ignore. if (np.isnan(g2d_params.x_mean_0.value) or (np.abs(g2d_params.x_mean_0.value) > (cutout_size * resamp)) or (np.abs(g2d_params.y_mean_0.value) > (cutout_size * resamp))): print(f' p{pid} - Bad fit for {ss}') continue # Add to our average observed/model PSFs if sources['flux'][ss] > 1.9: final_psf_count += 1 final_psf_obs += cutout_resamp final_psf_mod += g2d_image # Save the FWHM and angle. x_fwhm[ ss] = g2d_params.x_stddev_0.value / gaussian_fwhm_to_sigma / resamp y_fwhm[ ss] = g2d_params.y_stddev_0.value / gaussian_fwhm_to_sigma / resamp theta[ss] = g2d_params.theta_0.value # calc residuals - based on FWHM # relevant part of cutout mid_ss = cutout_resamp.shape[0] / 2 x_hi_ss = int(round(mid_ss + x_fwhm[ss])) x_lo_ss = int(round(mid_ss - x_fwhm[ss])) y_hi_ss = int(round(mid_ss + y_fwhm[ss])) y_lo_ss = int(round(mid_ss - y_fwhm[ss])) cutout_resamp_cut = cutout_resamp[y_lo_ss:y_hi_ss, x_lo_ss:x_hi_ss] # fit metrics diff_img_ss = cutout_resamp_cut - g2d_image[y_lo_ss:y_hi_ss, x_lo_ss:x_hi_ss] PSF_mean_ss = np.mean(cutout_resamp_cut) residual_ss = np.sum(diff_img_ss**2) # Least Squares Sum (LSS) med_fr_ss = np.median( np.abs(diff_img_ss / cutout_resamp_cut)) # median fractional residual (MFR) fvu_ss = residual_ss / np.sum( (cutout_resamp_cut - PSF_mean_ss)** 2) # fraction of variance unexplained (FVU) # Save the fit lss[ss] = residual_ss fvu[ss] = fvu_ss mfr[ss] = med_fr_ss if (plot_psf_compare == True) and (x_lo > 200) and (y_lo > 200): #plt.figure(4, figsize=(6, 4)) vmin = cutout_resamp.min() vmax = cutout_resamp.max() plt.figure(4, figsize=(12, 3)) plt.clf() # 1. Cut out Source plt.subplot(1, 4, 1) plt.imshow(cutout_resamp, origin='lower', vmin=vmin, vmax=vmax) plt.gca().add_patch( Rectangle((x_lo_ss, y_lo_ss), x_hi_ss - x_lo_ss, y_hi_ss - y_lo_ss, edgecolor='red', facecolor='none', lw=2)) plt.colorbar(fraction=0.046, pad=0.05) plt.title(f'Image (resamp={resamp:d})') # 2. Model of source plt.subplot(1, 4, 2) plt.imshow(g2d_image, origin='lower', vmin=vmin, vmax=vmax) plt.gca().add_patch( Rectangle((x_lo_ss, y_lo_ss), x_hi_ss - x_lo_ss, y_hi_ss - y_lo_ss, edgecolor='red', facecolor='none', lw=2)) plt.colorbar(fraction=0.046, pad=0.05) plt.title(f'Model (resamp={resamp:d})') # 3. Residual - Subtraction plt.subplot(1, 4, 3) plt.imshow(cutout_resamp - g2d_image, origin='lower', vmin=-vmax / 6, vmax=vmax / 6) plt.gca().add_patch( Rectangle((x_lo, y_lo), x_hi - x_lo, y_hi - y_lo, edgecolor='red', facecolor='none', lw=1)) plt.colorbar(fraction=0.046, pad=0.04) plt.title(f"Data-Model (resamp={resamp:d})") # 4. Residual - Fraction plt.subplot(1, 4, 4) plt.subplots_adjust(left=0.08) plt.imshow((cutout_resamp - g2d_image) / cutout_resamp, vmin=-1, vmax=1) # take out outliers? plt.colorbar(fraction=0.046, pad=0.05) plt.title('Residual fraction') plt.suptitle( f"Source {ss} fit, FWHM x: {x_fwhm[ss]:.2f} y: {y_fwhm[ss]:.2f} | LSS {residual_ss:.2e} | FVU {fvu_ss:.2e} | MFR {med_fr_ss:.2e}" ) plt.tight_layout() plt.pause(0.05) pdb.set_trace() # Some occasional display if (plot_psf_compare == True) and (ss % 250 == 0): plt.figure(2, figsize=(8, 3)) plt.clf() plt.subplot(1, 2, 1) plt.subplots_adjust(left=0.08) plt.imshow(final_psf_obs) plt.colorbar(fraction=0.25) plt.title(f'Obs PSF (resamp = {resamp:d})') plt.subplot(1, 2, 2) plt.subplots_adjust(left=0.08) plt.imshow(final_psf_mod) plt.colorbar(fraction=0.25) #plt.axis('equal') plt.title(f'Mod PSF (resamp = {resamp:d})') plt.suptitle(f"Observed vs. Model PSF average fit") plt.pause(0.05) print( f' p{pid} - ss={ss} fwhm_x={x_fwhm[ss]:.1f} fwhm_y={y_fwhm[ss]:.1f}' ) sources['x_fwhm'] = x_fwhm sources['y_fwhm'] = y_fwhm sources['theta'] = theta sources['LSS'] = lss sources['FVU'] = fvu sources['MFR'] = mfr # Save the average PSF (flux-weighted). Note we are making a slight mistake # here since each PSF has a different sub-pixel position... still same for both # obs and model final_psf_obs /= final_psf_count final_psf_mod /= final_psf_count final_psf_obs /= final_psf_obs.sum() final_psf_mod /= final_psf_mod.sum() # saving psf img_dir_name, img_file_name = os.path.split(img_file) psf_dir = img_dir_name + '/psf/' util.mkdir(psf_dir) fits.writeto(psf_dir + img_file_name.replace('.fits', '_psf_obs.fits'), final_psf_obs, hdr, overwrite=True) fits.writeto(psf_dir + img_file_name.replace('.fits', '_psf_mod.fits'), final_psf_mod, hdr, overwrite=True) #TODO: make starlist specific # Drop sources with flux (signifiance) that isn't good enough. # Empirically this is <1.2 # Also drop sources that couldn't be fit. good = np.where((sources['flux'] > 1.9) & (sources['x_fwhm'] > 0) & (sources['y_fwhm'] > 0))[0] sources = sources[good] # Only use the brightest sources for calculating the mean. This is just for printing. idx = np.where(sources['flux'] > 5)[0] x_fwhm_med = np.median(sources['x_fwhm'][idx]) y_fwhm_med = np.median(sources['y_fwhm'][idx]) print(f' p{pid} - Number of sources = {len(sources)}') print( f' p{pid} - Median x_fwhm = {x_fwhm_med:.1f} +/- {sources["x_fwhm"].std():.1f}' ) print( f' p{pid} - Median y_fwhm = {y_fwhm_med:.1f} +/- {sources["y_fwhm"].std():.1f}' ) fwhm_curr = np.mean([x_fwhm_med, y_fwhm_med]) formats = { 'xcentroid': '%8.3f', 'ycentroid': '%8.3f', 'sharpness': '%.2f', 'roundness1': '%.2f', 'roundness2': '%.2f', 'peak': '%10.1f', 'flux': '%10.6f', 'mag': '%6.2f', 'x_fwhm': '%5.2f', 'y_fwhm': '%5.2f', 'theta': '%6.3f', 'LSS': '%5.2f', 'FVU': '%5.2f', 'MFR': '%5.2f', } sources.write(img_file.replace('.fits', '_stars.txt'), format='ascii.fixed_width', delimiter=None, bookend=False, formats=formats, overwrite=True) return
def run(self) -> None: """ Run method of the module. Locates the position of the calibration spots in the center frame. From the four spots, the position of the star behind the coronagraph is fitted, and the images are shifted and cropped. Returns ------- NoneType None """ def _get_center(center): center_frame = self.m_center_in_port[0, ] if center_shape[0] > 1: warnings.warn( 'Multiple center images found. Using the first image of the stack.' ) if center is None: center = center_pixel(center_frame) else: center = (np.floor(center[0]), np.floor(center[1])) return center_frame, center self.m_image_out_port.del_all_data() self.m_image_out_port.del_all_attributes() center_shape = self.m_center_in_port.get_shape() im_shape = self.m_image_in_port.get_shape() center_frame, self.m_center = _get_center(self.m_center) if im_shape[-2:] != center_shape[-2:]: raise ValueError( 'Science and center images should have the same shape.') pixscale = self.m_image_in_port.get_attribute('PIXSCALE') self.m_sigma /= pixscale if self.m_size is not None: self.m_size = int(math.ceil(self.m_size / pixscale)) if self.m_dither: dither_x = self.m_image_in_port.get_attribute('DITHER_X') dither_y = self.m_image_in_port.get_attribute('DITHER_Y') nframes = self.m_image_in_port.get_attribute('NFRAMES') nframes = np.cumsum(nframes) nframes = np.insert(nframes, 0, 0) center_frame_unsharp = center_frame - gaussian_filter( input=center_frame, sigma=self.m_sigma) # size of center image, only works with odd value ref_image_size = 21 # Arrays for the positions x_pos = np.zeros(4) y_pos = np.zeros(4) # Loop for 4 waffle spots for i in range(4): # Approximate positions of waffle spots if self.m_pattern == 'x': x_0 = np.floor(self.m_center[0] + self.m_radius * np.cos(np.pi / 4. * (2 * i + 1))) y_0 = np.floor(self.m_center[1] + self.m_radius * np.sin(np.pi / 4. * (2 * i + 1))) elif self.m_pattern == '+': x_0 = np.floor(self.m_center[0] + self.m_radius * np.cos(np.pi / 4. * (2 * i))) y_0 = np.floor(self.m_center[1] + self.m_radius * np.sin(np.pi / 4. * (2 * i))) tmp_center_frame = crop_image(image=center_frame_unsharp, center=(int(y_0), int(x_0)), size=ref_image_size) # find maximum in tmp image coords = np.unravel_index(indices=np.argmax(tmp_center_frame), shape=tmp_center_frame.shape) y_max, x_max = coords[0], coords[1] pixmax = tmp_center_frame[y_max, x_max] max_pos = np.array([x_max, y_max]).reshape(1, 2) # Check whether it is the correct maximum: second brightest pixel should be nearby tmp_center_frame[y_max, x_max] = 0. # introduce distance parameter dist = np.inf while dist > 2: coords = np.unravel_index(indices=np.argmax(tmp_center_frame), shape=tmp_center_frame.shape) y_max_new, x_max_new = coords[0], coords[1] pixmax_new = tmp_center_frame[y_max_new, x_max_new] # Caculate minimal distance to previous points tmp_center_frame[y_max_new, x_max_new] = 0. dist = np.amin( np.linalg.norm(np.vstack((max_pos[:, 0] - x_max_new, max_pos[:, 1] - y_max_new)), axis=0)) if dist <= 2 and pixmax_new < pixmax: break max_pos = np.vstack((max_pos, [x_max_new, y_max_new])) x_max = x_max_new y_max = y_max_new pixmax = pixmax_new x_0 = x_0 - (ref_image_size - 1) / 2 + x_max y_0 = y_0 - (ref_image_size - 1) / 2 + y_max # create reference image around determined maximum ref_center_frame = crop_image(image=center_frame_unsharp, center=(int(y_0), int(x_0)), size=ref_image_size) # Fit the data using astropy.modeling gauss_init = models.Gaussian2D(amplitude=np.amax(ref_center_frame), x_mean=x_0, y_mean=y_0, x_stddev=1., y_stddev=1., theta=0.) fit_gauss = fitting.LevMarLSQFitter() y_grid, x_grid = np.mgrid[y_0 - (ref_image_size - 1) / 2:y_0 + (ref_image_size - 1) / 2 + 1, x_0 - (ref_image_size - 1) / 2:x_0 + (ref_image_size - 1) / 2 + 1] gauss = fit_gauss(gauss_init, x_grid, y_grid, ref_center_frame) x_pos[i] = gauss.x_mean.value y_pos[i] = gauss.y_mean.value # Find star position as intersection of two lines x_center = ((y_pos[0]-x_pos[0]*(y_pos[2]-y_pos[0])/(x_pos[2]-float(x_pos[0]))) - (y_pos[1]-x_pos[1]*(y_pos[1]-y_pos[3])/(x_pos[1]-float(x_pos[3])))) / \ ((y_pos[1]-y_pos[3])/(x_pos[1]-float(x_pos[3])) - (y_pos[2]-y_pos[0])/(x_pos[2]-float(x_pos[0]))) y_center = x_center*(y_pos[1]-y_pos[3])/(x_pos[1]-float(x_pos[3])) + \ (y_pos[1]-x_pos[1]*(y_pos[1]-y_pos[3])/(x_pos[1]-float(x_pos[3]))) nimages = self.m_image_in_port.get_shape()[0] npix = self.m_image_in_port.get_shape()[1] start_time = time.time() for i in range(nimages): progress(i, nimages, 'Running WaffleCenteringModule...', start_time) image = self.m_image_in_port[i, ] shift_yx = np.array([(float(im_shape[-2]) - 1.) / 2. - y_center, (float(im_shape[-1]) - 1.) / 2. - x_center]) if self.m_dither: index = np.digitize(i, nframes, right=False) - 1 shift_yx[0] -= dither_y[index] shift_yx[1] -= dither_x[index] if npix % 2 == 0 and self.m_size is not None: im_tmp = np.zeros((image.shape[0] + 1, image.shape[1] + 1)) im_tmp[:-1, :-1] = image image = im_tmp shift_yx[0] += 0.5 shift_yx[1] += 0.5 im_shift = shift_image(image, shift_yx, 'spline') if self.m_size is not None: im_crop = crop_image(im_shift, None, self.m_size) self.m_image_out_port.append(im_crop, data_dim=3) else: self.m_image_out_port.append(im_shift, data_dim=3) sys.stdout.write('Running WaffleCenteringModule... [DONE]\n') sys.stdout.write('Center [x, y] = [' + str(x_center) + ', ' + str(y_center) + ']\n') sys.stdout.flush() history = f'[x, y] = [{round(x_center, 2)}, {round(y_center, 2)}]' self.m_image_out_port.copy_attributes(self.m_image_in_port) self.m_image_out_port.add_history('WaffleCenteringModule', history) self.m_image_out_port.close_port()
def getFWHM(psf, pixelScale, rebin=1, method='contour', nargout=2, center=None, std_guess=2): # Gaussian and Moffat fitting are not really efficient on # anisoplanatic PSF. Prefer the coutour function in such a # case. The cutting method is not compliant to PSF not oriented #along x or y-axis. #Interpolation Ny, Nx = psf.shape if rebin > 1: im_hr = interpolateSupport(psf, rebin * np.array([Nx, Ny])) else: im_hr = psf if method == 'cutting': # Brutal approach when the PSF is centered and aligned x-axis FWHM imy = im_hr[:, int(Ny * rebin / 2)] w = np.where(imy >= imy.max() / 2)[0] FWHMy = pixelScale * (w.max() - w.min()) / rebin #y-axis FWHM imx = im_hr[int(Nx * rebin / 2), :] w = np.where(imx >= imx.max() / 2)[0] FWHMx = (w.max() - w.min()) / rebin * pixelScale theta = 0 elif method == 'contour': # Contour approach~: something wrong about the ellipse orientation mpl.interactive(False) fig = plt.figure() C = plt.contour(im_hr, levels=[im_hr.max() / 2]) plt.close(fig) C = C.collections[0].get_paths()[0] C = C.vertices xC = C[:, 0] yC = C[:, 1] # centering the ellispe mx = np.array([xC.max(), yC.max()]) mn = np.array([xC.min(), yC.min()]) cent = (mx + mn) / 2 wx = xC - cent[0] wy = yC - cent[1] # Get the module wr = np.hypot(wx, wy) / rebin * pixelScale # Getting the FWHM FWHMx = 2 * wr.max() FWHMy = 2 * wr.min() #Getting the ellipse orientation xm = wx[wr.argmax()] ym = wy[wr.argmax()] theta = np.mean(180 * np.arctan2(ym, xm) / np.pi) mpl.interactive(True) elif method == 'gaussian': # Prepare array r with radius in arcseconds y, x = np.indices(psf.shape, dtype=float) if center is None: # Normalize psf = psf / psf.max() # get exact center of image center = tuple((a - 1) / 2.0 for a in psf.shape[::-1]) x -= center[0] y -= center[1] Y, X = np.mgrid[:Ny, :Nx] * pixelScale std_guess = std_guess * pixelScale # Define the model g_init = models.Gaussian2D(amplitude=1., x_mean=0, y_mean=0, x_stddev=std_guess, y_stddev=std_guess) g_init.x_mean.fixed = True g_init.y_mean.fixed = True fit_g = fitting.LevMarLSQFitter() # fit x axis g = fit_g(g_init, X - center[0], Y - center[1], psf) FWHMx = 2 * np.sqrt(2 * np.log(2)) * np.abs(g.x_stddev) FWHMy = 2 * np.sqrt(2 * np.log(2)) * np.abs(g.y_stddev) # Get Ellipticity aRatio = np.max([FWHMx / FWHMy, FWHMy / FWHMx]) if nargout == 1: return 0.5 * (FWHMx + FWHMy) elif nargout == 2: return FWHMx, FWHMy elif nargout == 3: return FWHMx, FWHMy, aRatio elif nargout == 4: return FWHMx, FWHMy, aRatio, theta
def find_best_fit(yeldax, yelday, plot_look=False, pos_txt='sig_trimapril_pos.txt', errtype=1, fitter=None): ''' go throigh order of legendre polynomial look at residuals / errors Fit with gaussian with sigma of 1 measure chi squared print those number on a plot, call it a day ''' if fitter == None: fitter = high_order.LegTransform orders = range(3, 10) tab = Table.read(pos_txt, format='ascii.fixed_width') if errtype == 1: tot_err = np.sqrt(tab['xerr']**2 + tab['yerr']**2 + (tab['xrerr'] * 5)**2 + (tab['yrerr'] * 5)**2) elif errtype == 2: tot_err = np.sqrt(tab['xerr']**2 + tab['yerr']**2) for i in orders: tapr, dx, dy, gbool, sbool = fit_dist(pos_txt=pos_txt, order=i, n_iter_fit=1, lookup=False) dxn = dx[sbool] / tot_err[gbool][sbool] dyn = dy[sbool] / tot_err[gbool][sbool] xN, xbin_edge = np.histogram(dxn, bins=100, range=(-5, 5)) yN, ybin_edge = np.histogram(dyn, bins=100, range=(-5, 5)) #import pdb; pdb.set_trace() bcenx = np.zeros(len(xbin_edge) - 1) bceny = np.zeros(len(xbin_edge) - 1) for dd in range(len(xbin_edge) - 1): bcenx[dd] = np.mean(xbin_edge[dd] + xbin_edge[dd + 1]) / 2.0 bceny[dd] = np.mean(ybin_edge[dd] + ybin_edge[dd + 1]) / 2.0 #import pdb; pdb.set_trace() fit_p = fitting.LevMarLSQFitter() gy = models.Gaussian1D(mean=0, stddev=1.0) gy.mean.fixed = True #gy.stddev.fixed = True gx = models.Gaussian1D(mean=0, stddev=1.0) gx.mean.fixed = True #gx.stddev.fixed = True mx = fit_p(gx, bcenx, xN) my = fit_p(gy, bceny, yN) #import pdb; pdb.set_trace() chix = np.sum((mx(bcenx) - xN)**2 / mx(bcenx)) chiy = np.sum((my(bceny) - yN)**2 / my(bceny)) plt.figure(1) plt.clf() plt.scatter(bcenx, xN) plt.plot(bcenx, mx(bcenx)) plt.text( np.min(bcenx) + 1, np.max(xN) / 2.0, r'$\chi^{2}$: ' + str(chix)[:5]) plt.text( np.min(bcenx) + 1, np.max(xN) / 2.0 - 20, r'$\sigma$:' + str(mx.stddev.value)[:6]) #plt.text(np.min(bcenx)+2, np.max(xN)/2.0-30,'smooth factor: '+str(i)) plt.title('X residual Leg order' + str(i)) plt.xlabel('residual / error') plt.ylabel('N') plt.savefig('Leg_x_resid_ord' + str(i) + '.png') plt.figure(2) plt.clf() #plt.hist(dyn, bins=100) plt.scatter(bceny, yN) plt.plot(bceny, my(bceny)) plt.text( np.min(bceny) + 1, np.max(yN) / 2.0, r'$\chi^{2}$: ' + str(chiy)[:5]) plt.text( np.min(bceny) + 1, np.max(yN) / 2.0 - 20, r'$\sigma$:' + str(my.stddev.value)[:6]) #plt.text(np.min(bceny)+2, np.max(yN)/2.0-30,'smooth factor: '+str(i)) plt.title('Y residual Leg order' + str(i)) plt.xlabel('residual / error') plt.ylabel('N') plt.savefig('Leg_y_resid_ord' + str(i) + '.png') plt.figure(3) plt.clf() lx, ly = leg2lookup(tapr) plot_lookup_diff(lx, ly, yeldax, yelday) plt.title('Difference Legendre and Yelda') plt.savefig('Leg' + str(i) + '_resid_yelda.png')
def test_deriv_1D(self, model_class, test_parameters): """ Test the derivative of a model by comparing results with an estimated derivative. """ x_lim = test_parameters['x_lim'] if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase): return if "log_fit" in test_parameters: if test_parameters['log_fit']: x = np.logspace(x_lim[0], x_lim[1], self.N) else: x = np.linspace(x_lim[0], x_lim[1], self.N) parameters = test_parameters['parameters'] model_with_deriv = create_model(model_class, test_parameters, use_constraints=False) model_no_deriv = create_model(model_class, test_parameters, use_constraints=False) # NOTE: PR 10644 replaced deprecated usage of RandomState but could not # find a new seed that did not cause test failure, resorted to hardcoding. # add 10% noise to the amplitude rsn_rand_1234567890 = np.array([ 0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748, 0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161, 0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388, 0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526, 0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314, 0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748, 0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099, 0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673, 0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369, 0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824, 0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966, 0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713, 0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052, 0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480, 0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519, 0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486, 0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576, 0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468, 0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252, 0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890 ]) n = 0.1 * parameters[0] * (rsn_rand_1234567890 - 0.5) data = model_with_deriv(x) + n fitter_with_deriv = fitting.LevMarLSQFitter() new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data) fitter_no_deriv = fitting.LevMarLSQFitter() new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data, estimate_jacobian=True) assert_allclose(new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.15)
def comp_yelda(yeldax, yelday, yelda_pos='yelda_pos.txt'): ''' goes through successive orders of legendre polynomial and compare the results for fitting the yelda data to the final Yelada distortion map ''' for i in range(3, 8): t, outx, outy, dx, dy, sbooln = fit_dist(pos_txt=yelda_pos, order=i, n_iter_fit=1, wtype=2) plot_dist(yeldax, outx, title2='Leg:' + str(i), title1='Yelda', title3='Difference', title4='Difference', vmind=-.5, vmaxd=.5, outfile='yelda_plots/Dist_sol_X_' + str(i) + '.png') plot_dist(yelday, outy, title2='Leg:' + str(i), title1='Yelda', title3='Difference', title4='Difference', vmind=-.5, vmaxd=.5, outfile='yelda_plots/Dist_sol_Y_' + str(i) + '.png') plt.figure(1) plt.clf() plt.hist((yeldax - outx).flatten(), bins=100, alpha=.5, label='x') plt.hist((yelday - outy).flatten(), bins=100, alpha=.5, label='y') plt.xlabel('difference (pixels)') plt.ylabel('N') plt.title('Difference L' + str(i) + ' and Yelda') plt.savefig('yelda_plots/residual_' + str(i) + '.png') ref = Table.read(yelda_pos, format='ascii.fixed_width') tot_err = np.sqrt(ref['xerr']**2 + ref['yerr']**2) dxn = dx / tot_err[sbooln] dyn = dy / tot_err[sbooln] xN, xbin_edge = np.histogram(dxn, bins=100, range=(-10, 10)) yN, ybin_edge = np.histogram(dyn, bins=100, range=(-10, 10)) #import pdb; pdb.set_trace() bcenx = np.zeros(len(xbin_edge) - 1) bceny = np.zeros(len(xbin_edge) - 1) for dd in range(len(xbin_edge) - 1): bcenx[dd] = np.mean(xbin_edge[dd] + xbin_edge[dd + 1]) / 2.0 bceny[dd] = np.mean(ybin_edge[dd] + ybin_edge[dd + 1]) / 2.0 #import pdb; pdb.set_trace() fit_p = fitting.LevMarLSQFitter() gy = models.Gaussian1D(mean=0, stddev=1.0) gy.mean.fixed = True #gy.stddev.fixed = True gx = models.Gaussian1D(mean=0, stddev=1.0) gx.mean.fixed = True #gx.stddev.fixed = True mx = fit_p(gx, bcenx, xN) my = fit_p(gy, bceny, yN) #import pdb; pdb.set_trace() chix = np.sum((mx(bcenx) - xN)**2 / mx(bcenx)) chiy = np.sum((my(bceny) - yN)**2 / my(bceny)) plt.figure(1) plt.clf() plt.scatter(bcenx, xN) plt.plot(bcenx, mx(bcenx)) plt.text( np.min(bcenx) + 1, np.max(xN) / 2.0, r'$\chi^{2}$: ' + str(chix)[:5]) plt.text( np.min(bcenx) + 1, np.max(xN) / 2.0 + 10, r'$\sigma$:' + str(mx.stddev.value)[:6]) #plt.text(np.min(bcenx)+2, np.max(xN)/2.0-30,'smooth factor: '+str(i)) plt.title('X residual Leg order' + str(i)) plt.xlabel('residual / error') plt.ylabel('N') plt.savefig('yelda_plots/Leg_x_resid_ord' + str(i) + '.png') plt.figure(2) plt.clf() #plt.hist(dyn, bins=100) plt.scatter(bceny, yN) plt.plot(bceny, my(bceny)) plt.text( np.min(bceny) + 1, np.max(yN) / 2.0, r'$\chi^{2}$: ' + str(chiy)[:5]) plt.text( np.min(bceny) + 1, np.max(yN) / 2.0 - +10, r'$\sigma$:' + str(my.stddev.value)[:6]) #plt.text(np.min(bceny)+2, np.max(yN)/2.0-30,'smooth factor: '+str(i)) plt.title('Y residual Leg order' + str(i)) plt.xlabel('residual / error') plt.ylabel('N') plt.savefig('yelda_plots/Leg_y_resid_ord' + str(i) + '.png')
def fit_lines(spectrum, model, fitter=fitting.LevMarLSQFitter(), exclude_regions=None, weights=None, window=None, **kwargs): """ Fit the input models to the spectrum. The parameter values of the input models will be used as the initial conditions for the fit. Parameters ---------- spectrum : Spectrum1D The spectrum object over which the equivalent width will be calculated. model: `~astropy.modeling.Model` or list of `~astropy.modeling.Model` The model or list of models that contain the initial guess. fitter : `~astropy.modeling.fitting.Fitter`, optional Fitter instance to be used when fitting model to spectrum. exclude_regions : list of `~specutils.SpectralRegion` List of regions to exclude in the fitting. weights : list or 'unc', optional If 'unc', the unceratinties from the spectrum object are used to to calculate the weights. If list/ndarray, represents the weights to use in the fitting. window : `~specutils.SpectralRegion` or list of `~specutils.SpectralRegion` Regions of the spectrum to use in the fitting. If None, then the whole spectrum will be used in the fitting. Additional keyword arguments are passed directly into the call to the ``fitter``. Returns ------- models : Compound model of `~astropy.modeling.Model` A compound model of models with fitted parameters. Notes ----- * Could add functionality to set the bounds in ``model`` if they are not set. * The models in the list of ``model`` are added together and passed as a compound model to the `~astropy.modeling.fitting.Fitter` class instance. """ # # If we are to exclude certain regions, then remove them. # if exclude_regions is not None: spectrum = excise_regions(spectrum, exclude_regions) # # Make the model a list if not already # single_model_in = not isinstance(model, list) if single_model_in: model = [model] # # If a single model is passed in then just do that. # fitted_models = [] for modeli, model_guess in enumerate(model): # # Determine the window if it is not None. There # are several options here: # window = 4 * u.Angstrom -> Quantity # window = (4*u.Angstrom, 6*u.Angstrom) -> tuple # window = (4, 6)*u.Angstrom -> Quantity # # # Determine the window if there is one # if window is not None and isinstance(window, list): model_window = window[modeli] elif window is not None: model_window = window else: model_window = None # # Check to see if the model has units. If it does not # have units then we are going to ignore them. # ignore_units = getattr(model_guess, model_guess.param_names[0]).unit is None fit_model = _fit_lines(spectrum, model_guess, fitter, exclude_regions, weights, model_window, ignore_units, **kwargs) fitted_models.append(fit_model) if single_model_in: fitted_models = fitted_models[0] return fitted_models
Image_Data_All = fits1.data[:,:] Image_Data_All2 = Image_Data_All[300:400] #Define a running median calculator def RunMedian(x,N): idx = np.arange(N) + np.arange(len(x)-N+1)[:,None] b = [row[row>0] for row in x[idx]] return np.array(map(np.median,b)) #Create the Gaussian and Moffat fits and creates Fit_Data and Fit_Data2 #which contains the parameters for the two models (Moffat still does not work correct) x = np.linspace(-50,50,100) Gauss_Model = models.Gaussian1D(amplitude = 1000., mean = 0, stddev = 1.) Moffat_Model = models.Moffat1D(amplitude = 1000, x_0 = 0, gamma = 1, alpha = 2) Fitting_Model = fitting.LevMarLSQFitter() Fit_Data = [] Fit_Data_2 = [] for i in range(0, Image_Data_All2.shape[1]): Fit_Data_2.append(Fitting_Model(Gauss_Model, x, Image_Data_All2[:,i])) #for i in range(0, Image_Data_All2.shape[1]): # Fit_Data_2.append(Fitting_Model(Moffat_Model, x, Image_Data_All2[:,i])) for i in range(0, Image_Data_All2.shape[1]): if Fit_Data: # true if not an empty list Gauss_Model = models.Gaussian1D(amplitude=Fit_Data[-1].amplitude, mean=Fit_Data[-1].mean, stddev=Fit_Data[-1].stddev)
fwhm = 2.355 import numpy as np, pyfits, cPickle from matplotlib.patches import Ellipse from astropy.modeling import models, fitting fit_p = fitting.LevMarLSQFitter() from scipy.optimize import curve_fit from scipy.special import erf def beam_factor(xpix, ypix, freqGHz, ratios, noise_list): nSamples = 10000 factor = freqGHz / 60 * 0.5 pctrs = pctrs = [(750, 750), (917.42261089116289, 452.48369657494271), (806.96861768041856, 452.42720948105159), (917.27641014690346, 643.54361577204429), (806.91944976081186, 643.48717830145449), (972.55227473819843, 548.05291404475804), (862.1467847912545, 547.96840887824237), (751.74125821542486, 547.93998944819577), (1027.8765912332021, 452.59629406062078), (1027.6333576756713, 643.65611434189077), (696.56242900649295, 643.48680190600885), (1082.9577056577434, 548.19350494100092), (972.35753377858964, 739.11284658633997), (862.04908851816867, 739.02841569859129), (751.7406066611278, 739.00002124865318), (696.5145640484626, 452.42683275461894), (1027.3900902000746, 834.6959976649191), (917.13018901551754, 834.58359802467032), (806.87027498486566, 834.52721018446596), (696.61030063911267, 834.52683411992791),
def _fit_lines(spectrum, model, fitter=fitting.LevMarLSQFitter(calc_uncertainties=True), exclude_regions=None, weights=None, window=None, get_fit_info=False, ignore_units=False, **kwargs): """ Fit the input model (initial conditions) to the spectrum. Output will be the same model with the parameters set based on the fitting. spectrum, model -> model """ # # If we are to exclude certain regions, then remove them. # if exclude_regions is not None: spectrum = excise_regions(spectrum, exclude_regions) if isinstance(weights, str): if weights == 'unc': uncerts = spectrum.uncertainty # Astropy fitters expect weights in 1/sigma if uncerts is not None: weights = uncerts.array**-1 else: warnings.warn("Uncertainty values are not defined, but are " "trying to be used in model fitting.") else: raise ValueError("Unrecognized value `%s` in keyword argument.", weights) elif weights is not None: # Assume that the weights argument is list-like weights = np.array(weights) mask = spectrum.mask dispersion = spectrum.spectral_axis flux = spectrum.flux flux_unit = spectrum.flux.unit # # Determine the window if it is not None. There # are several options here: # window = 4 * u.Angstrom -> Quantity # window = (4*u.Angstrom, 6*u.Angstrom) -> tuple # window = (4, 6)*u.Angstrom -> Quantity # # # Determine the window if there is one # # In this case the window defines the area around the center of each model window_indices = None if window is not None and isinstance(window, (float, int)): center = model.mean window_indices = np.nonzero((dispersion >= center - window) & (dispersion < center + window)) # In this case the window is the start and end points of where we # should fit elif window is not None and isinstance(window, tuple): window_indices = np.nonzero((dispersion >= window[0]) & (dispersion <= window[1])) # in this case the window is spectral regions that determine where # to fit. elif window is not None and isinstance(window, SpectralRegion): idx1, idx2 = window.bounds if idx1 == idx2: raise IndexError("Tried to fit a region containing no pixels.") # HACK WARNING! This uses the extract machinery to create a set of # indices by making an "index spectrum" # note that any unit will do but Jy is at least flux-y # TODO: really the spectral region machinery should have the power # to create a mask, and we'd just use that... idxarr = np.arange(spectrum.flux.size).reshape(spectrum.flux.shape) index_spectrum = Spectrum1D(spectral_axis=dispersion, flux=u.Quantity(idxarr, u.Jy, dtype=int)) extracted_regions = extract_region(index_spectrum, window) if isinstance(extracted_regions, list): if len(extracted_regions) == 0: raise ValueError('The whole spectrum is windowed out!') window_indices = np.concatenate( [s.flux.value.astype(int) for s in extracted_regions]) else: if len(extracted_regions.flux) == 0: raise ValueError('The whole spectrum is windowed out!') window_indices = extracted_regions.flux.value.astype(int) if window_indices is not None: dispersion = dispersion[window_indices] flux = flux[window_indices] if mask is not None: mask = mask[window_indices] if weights is not None: weights = weights[window_indices] if flux is None or len(flux) == 0: raise Exception("Spectrum flux is empty or None.") input_spectrum = spectrum spectrum = Spectrum1D( flux=flux.value * flux_unit, spectral_axis=dispersion, wcs=input_spectrum.wcs, velocity_convention=input_spectrum.velocity_convention, rest_value=input_spectrum.rest_value) if not model._supports_unit_fitting: # Not all astropy models support units. For those that don't # we will strip the units and then re-add them before returning # the model. model, dispersion, flux = _strip_units_from_model( model, spectrum, convert=not ignore_units) # # Do the fitting of spectrum to the model. # if mask is not None: nmask = ~mask dispersion = dispersion[nmask] flux = flux[nmask] if weights is not None: weights = weights[nmask] fit_model = fitter(model, dispersion, flux, weights=weights, **kwargs) if hasattr(fitter, 'fit_info') and get_fit_info: fit_model.meta['fit_info'] = fitter.fit_info if not model._supports_unit_fitting: fit_model = QuantityModel(fit_model, spectrum.spectral_axis.unit, spectrum.flux.unit) return fit_model