def my_chisquare_charanderr(flux, fluxerr, char_var): fluxn, fluxerrn = vari_funcs.normalise_flux_and_errors(flux, fluxerr) meanflux = np.nanmean(fluxn, axis=1) top = np.square(fluxn-meanflux[:,None]) bot = np.square(fluxerrn) + char_var chi = np.nansum(top/bot, axis=1) return chi
def get_luminosity_and_flux(tbdata, xmm=False): # ### Extract magnitude table and error table ### flux = vari_funcs.flux4_stacks(tbdata) flux, tbdata = vari_funcs.noneg(flux, tbdata) # tbdata = tbdata[np.nanmean(flux,axis=1)>1e4] flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata, aper=4) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) ### Find luminosity distance ### z = tbdata['z_spec'] #[mask] z[z == -1] = tbdata['z_p'][z == -1] DL = cosmo.luminosity_distance(z) DL = DL.to(u.m) ### Get AB magnitude ### # abmag = tbdata['KMAG_20'] ### Convert to luminosity using formula worked out in lab book ### # L = 10 ** (-abmag/2.5) * 3631 * 1e-26 * (3e8/0.34e-6) * 4 * np.pi * (DL.value**2) # gives luminosity in W L = tbdata['M_K_z_p'] ### remove any that have val of 99 ### L[L == 99] = np.nan # remove those with null values # L[L > -5] = np.nan # remove faintest as seem spurious mask = ~np.isnan(L) # [mask] return tbdata[mask], L[mask], fluxnorm[mask], fluxerrnorm[mask]
def get_luminosity_and_flux(tbdata, xmm=False): # ### Extract magnitude table and error table ### flux = vari_funcs.flux5_stacks(tbdata) flux, tbdata = vari_funcs.noneg(flux, tbdata) flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) ### Find luminosity distance ### z = tbdata['z_spec'] #[mask] z[z == -1] = tbdata['z_p'][z == -1] DL = cosmo.luminosity_distance(z) DL = DL.to(u.cm) ### Calculate luminosity ### if xmm == True: xrayF = tbdata['CR(S)'] * 0.171 * (10**(-14) ) #conversion into ergs/s/cm2 else: xrayF = tbdata['Soft_flux'] #no conversion required in chandra xrayL = xrayF * 4 * np.pi * (DL.value**2) return tbdata, xrayL, fluxnorm, fluxerrnorm
def make_ensemble(tbdata, xrayL, binedge): ### Extract magnitude table and error table ### flux = vari_funcs.flux5_stacks(tbdata) flux, tbdata = vari_funcs.noneg(flux, tbdata) flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) ### Create dicts to save data into ### enflux = {} enfluxerr = {} enXrayL = {} ### loop over bins ### for m, enmin in enumerate(binedge): ### Isolate data needed ### mask1 = xrayL >= enmin if m != len(binedge) - 1: mask2 = xrayL < binedge[m + 1] else: mask2 = np.ones(len(mask1)) enmask = mask1 * mask2.astype(bool) enflux[m] = fluxnorm[enmask] enfluxerr[m] = fluxerrnorm[enmask] enXrayL[m] = xrayL[enmask] return enflux, enfluxerr, enXrayL
def get_luminosity_and_flux(tbdata, xmm=False): # ### Extract magnitude table and error table ### flux = vari_funcs.flux5_stacks(tbdata) flux, tbdata = vari_funcs.noneg(flux, tbdata) # tbdata = tbdata[np.nanmean(flux,axis=1)>1e4] flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) ### Find luminosity distance ### z = tbdata['z_spec'] #[mask] z[z == -1] = tbdata['z_p'][z == -1] DL = cosmo.luminosity_distance(z) DL = DL.to(u.m) ### Get AB magnitude ### abmag = tbdata['KMAG_20'] ### Convert to luminosity using formula worked out in lab book ### L = 10**(-abmag / 2.5) * 3631 * 1e-26 * (3e8 / 0.34e-6) * 4 * np.pi * ( DL.value**2) # gives luminosity in W return tbdata, L, fluxnorm, fluxerrnorm
def my_chisquare_epoch(flux, sigsq): sigsqn = np.tile(sigsq, [len(flux), 1]) fluxn, sigsqn = vari_funcs.normalise_flux_and_errors(flux, sigsqn) # fluxn = vari_funcs.normalise_flux(flux) meanflux = np.nanmean(fluxn, axis=1) top = np.square(fluxn - meanflux[:, None]) chi = np.nansum(top / (sigsqn**2), axis=1) return chi
def get_and_normalise_flux(tbdata, sigtb, aper=5): # Extract magnitude table and error table flux = vari_funcs.flux_stacks(tbdata, aper) flux, tbdata = vari_funcs.noneg(flux, tbdata) flux, fluxerr, tbdata = vari_funcs.create_quad_error_array( sigtb, tbdata, aper) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) return fluxnorm, fluxerrnorm, tbdata
def get_ensemble_sig(tbdata, sigtb, binedge, posvar, aper=5): # Extract magnitude table and error table flux = vari_funcs.flux_stacks(tbdata,aper) flux, tbdata = vari_funcs.noneg(flux, tbdata) flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata,aper) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) ### Find luminosity distance ### z = tbdata['z_spec']#[mask] z[z==-1] = tbdata['z_p'][z==-1] DL = cosmo.luminosity_distance(z) DL = DL.to(u.cm) ### Calculate the luminosity ### xrayF = tbdata['Full_flux']#[chanmask] xrayL = xrayF*4*np.pi*(DL.value**2) ### Create dicts to save data into ### enflux = {} enfluxerr = {} enxrayL = {} sig = np.empty(size) sigerr = np.empty(size) meanxrayL = np.empty(size) for m, enmin in enumerate(binedge): ### Isolate data needed ### mask1 = xrayL >= enmin if m != size-1: mask2 = xrayL < binedge[m+1] else: mask2 = np.ones(len(mask1)) enmask = mask1*mask2.astype(bool) enflux[m] = fluxnorm[enmask] enfluxerr[m] = fluxerrnorm[enmask] enxrayL[m] = xrayL[enmask] ### Combine into one flux curve per bin ### enfluxcurve = np.ravel(enflux[m]) enfluxcurveerr = np.ravel(enfluxerr[m]) ### Find max likelihood sig of curve ### [sig[m],sigerr[m]] = vari_funcs.maximum_likelihood(enfluxcurve, enfluxcurveerr, 1, posvar) ### find mean z ### meanxrayL[m] = np.nanmean(enxrayL[m]) return fluxnorm, fluxerrnorm, sig, sigerr, xrayL, meanxrayL
def get_luminosity_and_flux(tbdata, xmm=False): # ### Extract magnitude table and error table ### flux = vari_funcs.flux5_stacks(tbdata) flux, tbdata = vari_funcs.noneg(flux, tbdata) tbdata = tbdata[np.nanmean(flux, axis=1) > 1e4] flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) ### Get absolute magnitude ### L = tbdata['M_K_z_p'] return tbdata, L, fluxnorm, fluxerrnorm
def get_ensemble_sig(tbdata, sigtb, binedge, posvar, aper=5): # Extract magnitude table and error table flux = vari_funcs.flux_stacks(tbdata, aper) flux, tbdata = vari_funcs.noneg(flux, tbdata) flux, fluxerr, tbdata = vari_funcs.create_quad_error_array( sigtb, tbdata, aper) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) ### Find z ### z = tbdata['z_spec'] #[mask] z[z == -1] = tbdata['z_p'][z == -1] ### Create dicts to save data into ### enflux = {} enfluxerr = {} enz = {} sig = np.empty(size) sigerr = np.empty(size) meanz = np.empty(size) for m, enmin in enumerate(binedge): ### Isolate data needed ### mask1 = z >= enmin if m != size - 1: mask2 = z < binedge[m + 1] else: mask2 = z < 4.5 #np.ones(len(mask1)) enmask = mask1 * mask2.astype(bool) enflux[m] = fluxnorm[enmask] enfluxerr[m] = fluxerrnorm[enmask] enz[m] = z[enmask] ### Combine into one flux curve per bin ### enfluxcurve = np.ravel(enflux[m]) enfluxcurveerr = np.ravel(enfluxerr[m]) ### Find max likelihood sig of curve ### [sig[m], sigerr[m]] = vari_funcs.maximum_likelihood(enfluxcurve, enfluxcurveerr, 1, posvar) ### find mean z ### meanz[m] = np.nanmean(enz[m]) return fluxnorm, fluxerrnorm, sig, sigerr, z, meanz
def get_luminosity_and_flux(tbdata, xmm=False): # ### Extract magnitude table and error table ### flux = vari_funcs.flux4_stacks(tbdata) flux, tbdata = vari_funcs.noneg(flux, tbdata) # tbdata = tbdata[np.nanmean(flux,axis=1)>1e4] flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata, aper=4) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) L = tbdata['KFLUX_20'] ### remove any that have -ve val ### L[L < 0] = np.nan # remove those with null values # L[L > -5] = np.nan # remove faintest as seem spurious mask = ~np.isnan(L) # [mask] return tbdata[mask], L[mask], fluxnorm[mask], fluxerrnorm[mask]
def get_luminosity_and_flux(tbdata, xmm=False): ### Extract magnitude table and error table ### flux = vari_funcs.flux4_stacks(tbdata) flux, tbdata = vari_funcs.noneg(flux, tbdata) # tbdata = tbdata[np.nanmean(flux,axis=1)>1e4] flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata, aper=4) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) ### Get Luminosity ### L = tbdata['M_K_z_p'] L[L == 99] = np.nan mask = ~np.isnan(L) tbdata = tbdata[mask] L = L[mask] fluxnorm = fluxnorm[mask] fluxerrnorm = fluxerrnorm[mask] return tbdata, L, fluxnorm, fluxerrnorm
def run_max_likely(tbdata): posvar = np.linspace(0,2,5000) ### Remove edges ### tbdata = vari_funcs.remove_edges(tbdata) sigtb = Table.read('sigma_tables/quad_epoch_sigma_table_extra_clean_no06_2arcsec.fits') ### Extract magnitude table and error table ### flux = vari_funcs.flux4_stacks(tbdata) flux, tbdata = vari_funcs.noneg(flux, tbdata) # tbdata = tbdata[np.nanmean(flux,axis=1)>1e4] flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata, aper=4) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) ### Get sig values ### numobs = np.shape(fluxnorm)[0] meanflux = np.nanmean(fluxnorm, axis=1) out = np.array([vari_funcs.maximum_likelihood(fluxnorm[n,:], fluxerrnorm[n,:], meanflux[n], posvar, n=n, printn=100) for n in range(numobs)]) return out, tbdata
qdata['X-ray'][qdata['X-ray'] == 70] = False qdata['X-ray'][qdata['X-ray'] == 84] = True qflux = quadflux[key] qerr = quaderr[key] qchanflux = quadchanflux[key] qchanerr = quadchanerr[key] qsflux = quadsflux[key] qserr = quadserr[key] for n, binedge in enumerate(binarr[0:-1]): #get chi for gal qbflux, qberr = vari_funcs.fluxbinerr(binedge, binarr[n + 1], qflux, qerr) meanqb = np.nanmean(qbflux, axis=1) qbflux, qberr = vari_funcs.normalise_flux_and_errors(qbflux, qberr) mediancurve = np.nanmedian(qbflux, axis=0) print(mediancurve) chisq = my_chisquare_quad_median_err(qbflux, qberr, mediancurve) chisqold = vari_funcs.my_chisquare_err(qbflux, qberr) # plt.plot(meanqb, chisqold, 'mo', mfc='None', markersize=10) # get chi for chan qbchanflux, qbchanerr = vari_funcs.fluxbinerr(binedge, binarr[n + 1], qchanflux, qchanerr) meanqbchan = np.nanmean(qbchanflux, axis=1) qbchanflux, qbchanerr = vari_funcs.normalise_flux_and_errors( qbchanflux, qbchanerr) chisqchan = my_chisquare_quad_median_err(qbchanflux, qbchanerr,
newallmag = np.array([]) newallmagerr = np.array([]) newallchanmag = np.array([]) newallchanmagerr = np.array([]) newallsmag = np.array([]) newallsmagerr = np.array([]) for n, binedge in enumerate(bins): print(binedge) if n == np.size(bins) - 1: break mag, bindata = vari_funcs.fluxbin(binedge, bins[n + 1], allmag, tbdata) #bindata magerr = vari_funcs.fluxerr5_stacks(bindata) #make error array meanflux = np.mean(mag, axis=1) mag, magerr = vari_funcs.normalise_flux_and_errors(mag, magerr) errchange, newmagerr = times_chi_err(mag, magerr) #find correction allerrchange = np.append(allerrchange, errchange) #create array of corrections ### Apply correction tp stars and X-ray ### chanmag, chanbin = vari_funcs.fluxbin(binedge, bins[n + 1], allchanmag, chandata) chanmagerr = vari_funcs.fluxerr5_stacks(chanbin) chanmeanmag = np.mean(chanmag, axis=1) chanmag, chanmagerr = vari_funcs.normalise_flux_and_errors( chanmag, chanmagerr) newchanmagerr = chanmagerr * errchange #np.sqrt(np.square(chanmagerr) + np.square(errchange)) #change error smag, sbin = vari_funcs.fluxbin(binedge, bins[n + 1], allsmag, sdata) smagerr = vari_funcs.fluxerr5_stacks(sbin)
### Bin data ### allmedexcess = np.array([]) allmedexcesscorr = np.array([]) for n, binedge in enumerate(bins): # print(binedge) if n == np.size(bins) - 1: break mag, bindata = vari_funcs.fluxbin(binedge, bins[n + 1], flux, tbdata2) #bindata magcorr, bincorr = vari_funcs.fluxbin(binedge, bins[n + 1], fluxn, tbdata) #bindata # magerrcorr = vari_funcs.fluxerr5_stacks_corr(bincorr) #make error array magerrcorr = vari_funcs.fluxerr5_stacks(bincorr) #make error array # magerr = vari_funcs.fluxerr5_stacks_corr(bindata) #make error array magerr = vari_funcs.fluxerr5_stacks(bindata) #make error array nmag, nmagerr = vari_funcs.normalise_flux_and_errors(mag, magerr) nmagcorr, nmagerrcorr = vari_funcs.normalise_flux_and_errors( magcorr, magerrcorr) binexess = vari_funcs.normsigmasq(nmag, nmagerr) binexesscorr = vari_funcs.normsigmasq(nmagcorr, nmagerrcorr) medexcess = np.nanmedian(binexess) allmedexcess = np.append(allmedexcess, medexcess) medexcesscorr = np.nanmedian(binexesscorr) allmedexcesscorr = np.append(allmedexcesscorr, medexcesscorr) plt.plot(bins[0:26], allmedexcess, 'k--') plt.plot(bins[0:26], allmedexcesscorr, 'k') #%% Want to see what chi-square plot looks like with new errors #def min_chi_sq(mag, magerr): # avgmag = np.nanmedian(mag, axis=1) #use median mags as a start for the expected model
# Extract magnitude table and error table flux = vari_funcs.k_mag_flux.flux4_stacks(tbdata) flux, tbdata = vari_funcs.noneg(flux, tbdata) flux, fluxerr, tbdata = vari_funcs.k_mag_flux.create_quad_error_array(sigtb, tbdata, aper=4) #chanflux = vari_funcs.flux5_stacks(chandata) #chanflux, chandata = vari_funcs.noneg(chanflux, chandata) #chanflux, chanerr, chandata = vari_funcs.create_quad_error_array(sigtb, chandata, aper=5) #fullflux = vari_funcs.k_mag_flux.flux4_stacks(fullxray) #fullflux, fulldata = vari_funcs.noneg(fullflux, fullxray) #fullflux, fullerr, fullxray = vari_funcs.k_mag_flux.create_quad_error_array(sigtb, fullxray, aper=5) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) #chanfluxnorm, chanerrnorm = vari_funcs.normalise_flux_and_errors(chanflux, chanerr) #fullfluxnorm, fullerrnorm = vari_funcs.normalise_flux_and_errors(fullflux, fullerr) #%% All points posvar = np.linspace(0, 2, 5000) #start = time.time() numobs = np.shape(fluxnorm)[0] meanflux = np.nanmean(fluxnorm, axis=1) out = np.array([ vari_funcs.maximum_likelihood(fluxnorm[n, :], fluxerrnorm[n, :], meanflux[n], posvar, n=n, printn=1000) for n in range(numobs)
flux, tbdata = vari_funcs.noneg(flux, tbdata) flux, fluxerr, tbdata = vari_funcs.create_quad_error_array(sigtb, tbdata) chanflux = vari_funcs.flux5_stacks(chandata) chanflux, chandata = vari_funcs.noneg(chanflux, chandata) chanflux, chanerr, chandata = vari_funcs.create_quad_error_array( sigtb, chandata) fullflux = vari_funcs.flux5_stacks(fullxray) fullflux, fulldata = vari_funcs.noneg(fullflux, fullxray) fullflux, fullerr, fullxray = vari_funcs.create_quad_error_array( sigtb, fullxray) xmmflux = vari_funcs.flux5_stacks(xmmdata) xmmflux, xmmdata = vari_funcs.noneg(xmmflux, xmmdata) xmmflux, xmmerr, xmmdata = vari_funcs.create_quad_error_array(sigtb, xmmdata) ### Normalise ### fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(flux, fluxerr) chanfluxnorm, chanerrnorm = vari_funcs.normalise_flux_and_errors( chanflux, chanerr) fullfluxnorm, fullerrnorm = vari_funcs.normalise_flux_and_errors( fullflux, fullerr) xmmfluxnorm, xmmerrnorm = vari_funcs.normalise_flux_and_errors(xmmflux, xmmerr) #%% Find luminosity distance both for all and just for chandra sources ### z = tbdata['z_spec'] #[mask] z[z == -1] = tbdata['z_p'][z == -1] DL = cosmo.luminosity_distance(z) DL = DL.to(u.cm) chanz = chandata['z_spec'] #[chanmask] chanz[chanz == -1] = chandata['z_p'][chanz == -1] chanDL = cosmo.luminosity_distance(chanz) chanDL = chanDL.to(u.cm)
#allflux, alldata = vari_funcs.semfluxlim(allflux, alldata) #allfluxconv, alldataconv = vari_funcs.semfluxlim(allfluxconv, alldataconv) allflux, alldata = vari_funcs.noneg(allflux, alldata) allfluxconv, alldataconv = vari_funcs.noneg(allfluxconv, alldataconv) allfluxerr = vari_funcs.fluxerr1_stacks(alldata) allfluxconverr = vari_funcs.fluxerr1_stacks(alldataconv) #depths = np.load('fluxdepths.npy') #allfluxerr = np.zeros(np.shape(allflux)) + depths[None,:] #depthsconv = np.load('fluxdepthsconv_PSF.npy') #allfluxconverr = np.zeros(np.shape(allfluxconv)) + depthsconv[None,:] # Normalise allflux, allfluxerr = vari_funcs.normalise_flux_and_errors(allflux, allfluxerr) allfluxconv, allfluxconverr = vari_funcs.normalise_flux_and_errors( allfluxconv, allfluxconverr) ## Find FWHM values #avgfwhm = np.array([np.median(alldata['FWHM_WORLD_05B']), # np.median(alldata['FWHM_WORLD_06B']), # np.median(alldata['FWHM_WORLD_07B']), # np.median(alldata['FWHM_WORLD_08B']), # np.median(alldata['FWHM_WORLD_09B']), # np.median(alldata['FWHM_WORLD_10B']), # np.median(alldata['FWHM_WORLD_11B']), # np.median(alldata['FWHM_WORLD_12B'])]) *3600 # #avgfwhmconv = np.array([np.median(alldataconv['FWHM_WORLD_05B']), # np.median(alldataconv['FWHM_WORLD_06B']),
### plot new ### plt.figure(4, figsize=[8,8]) plt.plot(meanflux, newchisq, 'b+',zorder=2) plt.plot(meanchan, newchisqchan, 'ro', zorder=3, mfc='None', markersize=10) plt.plot(meansflux, newschisq, 'm*', zorder=1, mfc='None', markersize=10) plt.yscale('log') plt.xscale('log') plt.ylabel('Chi Squared') plt.xlabel('Mean Flux') plt.title('2nd iteration') plt.text(5e2, 1e3, r'$\chi^{2} = \sum{\frac{( \,{x_{i} - \bar{x}})^{2} \,}{\sigma_{noise}^{2}}}$') ### plot new variance ### plt.figure(5, figsize=[8,8]) #get errors binfluxn, binfluxerrn = vari_funcs.normalise_flux_and_errors(flux, fluxerr) binfluxchann, binfluxchanerrn = vari_funcs.normalise_flux_and_errors(fluxchan, fluxchanerr) binsfluxn, binsfluxerrn = vari_funcs.normalise_flux_and_errors(sflux, sfluxerr) #get variance sig = np.var(binfluxn, axis=1, ddof=1) sigchan = np.var(binfluxchann, axis=1, ddof=1) ssig = np.var(binsfluxn, axis=1, ddof=1) sigreal = sig - newmedvar[n] sigrealchan = sigchan - newmedvar[n] ssigreal = ssig - newmedvar[n] #plot plt.plot(meanflux, sigreal, 'b+', zorder=2) plt.plot(meanchan, sigrealchan, 'ro', zorder=3, mfc='None', markersize=10) plt.plot(meansflux, ssigreal, 'm*', zorder=1, mfc='None', markersize=10) plt.yscale('symlog', linthreshy=0.0001) plt.xscale('log')
sfluxerrn = vari_funcs.fluxerr5_stacks_corr(sdata) fluxerr = vari_funcs.fluxerr5_stacks(tbdata) fluxerrchan = vari_funcs.fluxerr5_stacks(chandata) sfluxerr = vari_funcs.fluxerr5_stacks(sdata) def single_min_chi_sq(mag, magerr): avgmag = np.nanmedian(mag) #use median mags as a start for the expected model testchange = np.linspace(-0.1,0.1,101) testexpect = testchange + avgmag chisq = np.array([(np.square(mag-testexpect[n]))/np.square(magerr) for n in range(101)]) sumchisq = np.nansum(chisq, axis=1) minchisq = np.min(sumchisq) expect = testexpect[sumchisq == minchisq] # plot the light curve and best fit line to check chisq visually plt.figure() t = np.arange(1,9) plt.errorbar(t, mag, magerr, fmt='x') plt.hlines(expect, 1, 8) return minchisq meansmagnew = np.mean(sfluxn, axis=1) meanmagnew = np.mean(fluxn, axis=1) meanchanmagnew = np.mean(fluxchann, axis=1) sfluxnorm, sfluxerrnorm = vari_funcs.normalise_flux_and_errors(sfluxn, sfluxerrn) fluxnorm, fluxerrnorm = vari_funcs.normalise_flux_and_errors(fluxn, fluxerrn) fluxchannorm, fluxerrchannorm = vari_funcs.normalise_flux_and_errors(fluxchann, fluxerrchann) schisq = single_min_chi_sq(sfluxnorm[53], sfluxerrnorm[53])