def test_angle(): #used to find one separation x = [1,0,0] y = [0,1,0] testang = burstutils.angle(x,y) assert (np.abs(testang - np.pi/2) < 1e-7)
def response2oneGRB(self, sourcetheta, sourcephi, sourcestrength): """If you wish, will allow you to examine the localization uncertainty of one sampled GRB of some given strenth at some point in the sky. For a full/complete simulation just use the function below, "response2GRB". Parameters ---------- sourcetheta : float The displacement in degrees in the zenithal direction. sourcephi : float The displacement in degrees in the azimuthal direction. sourcestrength : float The stength in counts of the simulated GRB. Returns ------- recpos : float The reconstructed position of the GRB based on the detectors' response. """ #I like to visualize in degrees, but convert to radians right away. sourcetheta = deg2rad(sourcetheta) sourcephi = deg2rad(sourcephi) sourcexyz = ang2vec(sourcetheta, sourcephi) #cartesian position of the burst print("Testing a burst @ " + str(rad2deg([sourcetheta, sourcephi]))) #The range and bin size of values used to generate cost of fitting, bottheta = 0 toptheta = 180 botphi = 0 topphi = 360 botA = 0 topA = 1000 ntheta = 20 #over sky chi points nphi = 37 nA = 100 #given a sky position, and detector normal we in theory know the separation, and can refer to a lookup table to identify the response should be (This is based on MEGAlib, and now I should explain it.) sepA = bf.angle(sourcexyz, self.normA) xA = bf.look_up_A(self.normA, sourcexyz) # print("separation from A is " + str(np.rad2deg(sepA))) #this check passes. dtheoryA = GRB.Ao * bf.response( sepA, xA) #still need to define strength, brb and gonna do that # print("dtheory test: " + str(dtheory)) # this check passes too. countsA = dtheoryA + self.bg unccountsA = sqrt(countsA) detactualA = gauss( countsA, unccountsA) #there is a lot of noise present, updating it now. if detactualA - self.bg < 0: detactualA = self.bg detcountsA = detactualA sepB = bf.angle(sourcexyz, self.normB) xB = bf.look_up_B(self.normB, sourcexyz) # print("separation from B is " + str(np.rad2deg(sepB))) #this check passes. dtheoryB = GRB.Ao * bf.response(sepB, xB) #still need to define strength, brb and gonna do that # print("dtheory test: " + str(dtheory)) # this check passes too. countsB = dtheoryB + self.bg unccountsB = sqrt(countsB) detactualB = gauss( countsB, unccountsB) #there is a lot of noise, present, updating it now. if detactualB - self.bg < 0: detactualB = self.bg detcountsB = detactualB sepC = bf.angle(sourcexyz, self.normC) # print("separation from C is " + str(np.rad2deg(sepC))) #this check passes. xC = bf.look_up_C(self.normC, sourcexyz) dtheoryC = GRB.Ao * bf.response( sepC, xC) #still need to define strength, brb and gonna do that # print("dtheory test: " + str(dtheory)) # this check passes too. countsC = dtheoryC + self.bg #another artifact, incl this background effect somewhere unccountsC = sqrt(countsC) detactualC = gauss( countsC, unccountsC) #there is a lot of noise, present, updating it now. if detactualC - self.bg < 0: detactualC = self.bg detcountsC = detactualC sepD = bf.angle(sourcexyz, self.normD) # print("separation from D is " + str(np.rad2deg(sepD))) #this check passes. xD = bf.look_up_D(self.normD, sourcexyz) dtheoryD = GRB.Ao * bf.response( sepD, xD) #still need to define strength, brb and gonna do that # print("dtheory test: " + str(dtheory)) # this check passes too. countsD = dtheoryD + self.bg #another artifact, incl this background effect somewhere unccountsD = sqrt(countsD) detactualD = gauss( countsD, unccountsD) #there is a lot of noise, present, updating it now. if detactualD - self.bg < 0: detactualD = self.bg detcountsD = detactualD #Have now obtained the responses of each detector, now using chi squared routine, find minium fit. #now point to this #coarse to fine optimization, worth including? chiA = bf.quad_solver(detcountsA, self.normA, bottheta, toptheta, botphi, topphi, botA, topA, ntheta, nphi, nA, self.bg, A=True) chiB = bf.quad_solver(detcountsB, self.normB, bottheta, toptheta, botphi, topphi, botA, topA, ntheta, nphi, nA, self.bg, B=True) chiC = bf.quad_solver(detcountsC, self.normC, bottheta, toptheta, botphi, topphi, botA, topA, ntheta, nphi, nA, self.bg, C=True) chiD = bf.quad_solver(detcountsD, self.normD, bottheta, toptheta, botphi, topphi, botA, topA, ntheta, nphi, nA, self.bg, D=True) chisquared = add(add(chiA, chiB), add(chiC, chiD)) #adds it all up for total chi2 thetaloc, philoc, Aguess = bf.indexer(chisquared, bottheta, toptheta, botphi, topphi, botA, topA, ntheta, nphi, nA) recvec = ang2vec(deg2rad(thetaloc), deg2rad(philoc)) locoffset = rad2deg(bf.angle(sourcexyz, recvec)) print("Loc offset = " + str(locoffset) + " deg")
def response2GRB(self, GRB, test=False, talk=False): #first need to include the GRB. if talk: if self.tiltB != self.tiltA: print("Detector Class: " + str(rad2deg(self.tiltA)) + ' by ' + str(rad2deg(self.tiltB)) + 'degrees') else: print("Detector Class: " + str(rad2deg(self.tiltA)) + ' degrees') """ Respond2GRB will determine the sky position of an array of GRB sources assuming some inherent background noise within detectors, along with fluctuations of either Gaussian or Poissonian nature. At the moment I'm assuming Gaussian, and to build a sufficent case each position in the sky is tested 100 times and averaged for this average localization offset. Simply adding +1 to this minimum and identifying that value would correspond to the standard deviation or error in this set. I'll add that now. Parameters ---------- GRB : object An instance of the separately defined "GRBs" class that contains a number of evenly spaced sky positions of a given strength. test : boolean For sanity purposes, if the simulation seems to give unrealistic results, switching to test mode allows for much quicker sampling, allowing it easier to spot potential errors. talk : boolean If desired, prints position by position results. Returns ---------- localizationerrors : array numpy array that contains the average localization uncertainty at each sky position. Additionally, response2GRB will print the sky position it is currently sampling, along with the average offset of localizations at that spot. """ stdev = True skyvals = [] skyunc = [] if test: nsamples = 1 skypoints = 1 else: #range of values used in the fitting. skypoints = len(GRB.sourceangs) #number of GRBs you're testing nsamples = 13 actual_responses = [] for i in range(skypoints): #for each grb sourceAng = GRB.sourceangs[i] if talk: print("For bursts @ " + str(rad2deg(sourceAng))) sourcexyz = ang2vec( sourceAng[0], sourceAng[1] ) #cartesian position of the burst at this position loop = 0 #I'm going to want to sample each sky position more than once, #here's where I define how many times that is loc_offsets = [] loc_errors = [] for i in range(nsamples): """A""" sepA = bf.angle(sourcexyz, self.normA) xA = bf.look_up_A(self.normA, sourcexyz) dtheoryA = GRB.Ao * bf.response(sepA, xA) countsA = dtheoryA + self.bg unccountsA = sqrt(countsA) detactualA = gauss(countsA, unccountsA) if detactualA - self.bg < 0: detactualA = 0 detcountsA = detactualA - self.bg """B""" sepB = bf.angle(sourcexyz, self.normB) xB = bf.look_up_B(self.normB, sourcexyz) dtheoryB = GRB.Ao * bf.response(sepB, xB) countsB = dtheoryB + self.bg unccountsB = sqrt(countsB) detactualB = gauss( countsB, unccountsB ) #there is a lot of noise, present, updating it now. if detactualB - self.bg < 0: detactualB = 0 detcountsB = detactualB - self.bg """C""" sepC = bf.angle(sourcexyz, self.normC) xC = bf.look_up_C(self.normC, sourcexyz) dtheoryC = GRB.Ao * bf.response( sepC, xC) #still need to define strength, brb and gonna do that countsC = dtheoryC + self.bg #another artifact, incl this background effect somewhere unccountsC = sqrt(countsC) detactualC = gauss( countsC, unccountsC ) #there is a lot of noise, present, updating it now. if detactualC - self.bg < 0: detactualC = 0 detcountsC = detactualC - self.bg """D""" sepD = bf.angle(sourcexyz, self.normD) xD = bf.look_up_D(self.normD, sourcexyz) dtheoryD = GRB.Ao * bf.response( sepD, xD) #still need to define strength, brb and gonna do that # print("dtheory test: " + str(dtheory)) # this check passes too. countsD = dtheoryD + self.bg #another artifact, incl this background effect somewhere unccountsD = sqrt(countsD) detactualD = gauss( countsD, unccountsD ) #there is a lot of noise, present, updating it now. if detactualD - self.bg < 0: detactualD = 0 detcountsD = detactualD - self.bg arr = array([ float(detcountsA), float(detcountsB), float(detcountsC), float(detcountsD) ]) arr = arr.reshape(1, -1) # if talk: # print(arr) normalized_arr = normalize(arr, axis=1) #converted #This tab corresponds to a new sky pos being tested, will have to evaluate all of these at once maybe? observed_data = DataFrame([]) observed_data['A'] = normalized_arr[0][0] * ones( len(self.ideal_data)) observed_data['B'] = normalized_arr[0][1] * ones( len(self.ideal_data)) observed_data['C'] = normalized_arr[0][2] * ones( len(self.ideal_data)) observed_data['D'] = normalized_arr[0][3] * ones( len(self.ideal_data)) #SO NOW WITH THIS OBSERVED DATA, COMPARE TO IDEAL RESPONES. """ ┳┻| ┻┳| ┳┻| ┻┳| ┳┻| ┻┳| ┳┻| ┻┳| ┳┻| ┻┳| ┳┻| ┻┳| ┳┻| _ ┻┳| •.•) essentially a new column is created with the chi term ┳┻|⊂ノ and the smallest one corresponds to reconstructed position. ┻┳| """ chiterms = (self.ideal_data - observed_data)**2 / self.ideal_data observed_data['chisquared'] = chiterms.sum(axis=1) chimin = observed_data['chisquared'].loc[ observed_data['chisquared'] == min( observed_data['chisquared'])].index[0] recpos = pix2ang(ipix=int(chimin), nside=self.nside) recvec = ang2vec(recpos[0], recpos[1]) if stdev: def find_nearest(array, value): array = asarray(array) idx = (abs(array - value)).argmin() return array[idx] error_pix = find_nearest( observed_data['chisquared'].values, (min(observed_data['chisquared']) + 1)) #print(error_pix) error_ang = pix2ang(ipix=int(error_pix), nside=self.nside) # print(error_ang) #there is an invalid value in here? error_vec = ang2vec(error_ang[0], error_ang[1]) loc_error = rad2deg(bf.angle(error_vec, recvec)) # print(type(loc_error)) if isnan(loc_error): loc_error = 90 #otherwise nan, means it was over 90 from the original so just set as 90x loc_offset = rad2deg(bf.angle(sourcexyz, recvec)) # print("Loc offset = " + str(locoffset) + " deg") loc_offsets.append(loc_offset) loc_errors.append(loc_error) #convert recpos into degrees sepearaiton. loc_offsets = array(loc_offsets) loc_errors = array(loc_errors) # nanmask = nanmask = np.isnan(locunc) # locunc = locunc[~nanmask] if talk: print("Avg offset: " + str(mean(loc_offsets))) print("Std. Error: " + str(mean(loc_errors))) print(" ") skyvals.append(mean(loc_offsets)) skyunc.append(mean(loc_error)) skyvals = array(skyvals) skyunc = array(skyunc) return skyvals, skyunc
def response2GRB(self, GRB, samples, test=True, talk=False): #is this how I inherit? #first need to include the GRB. """ Using x, respond2GRB will determine the sky position of an array of GRB sources assuming some inherent background noise within detectors, along with fluctuations of either Gaussian or Poissonian nature. Parameters ---------- GRB : object An instance of the separately defined "GRBs" class that contains a number of evenly spaced sky positions of a given strength. test : boolean For sanity purposes, if the simulation seems to give unrealistic results, switching to test mode allows for much quicker sampling, allowing it easier to spot potential errors. talk : boolean If desired, prints position by position results. Returns ---------- localizationerrors : array numpy array that contains the average localization uncertainty at each sky position. Additionally, response2GRB will print the sky position it is currently sampling, along with the average offset of localizations at that spot. """ if test: sample = 1 bottheta = 0 toptheta = 180 botphi = 0 topphi = 360 botA = 10 topA = 1000 ntheta = 20 #over sky chi points nphi = 37 nA = 100 else: #range of values used in the fitting. sample = len(GRB.sourceangs) #number of GRBs you're testing bottheta = 0 #zenith toptheta = 180 #(elevation) range of theta values horizon ntheta = 31 #over sky chi points #binning botphi = 0 #azimuthal angles topphi = 360 botA = 200 #range of amplitudes/strength of source it tries to match topA = 1000 #counts above background nphi = 120 nA = 12 self.localizationerrors = [] for i in range(sample): sourceAng = GRB.sourceangs[i] if talk: print("Testing " + str(rad2deg(sourceAng))) #this check passes. # print("Testing at " + str(np.rad2deg(GRB.sourceangs))) sourcexyz = ang2vec(sourceAng[0], sourceAng[1]) #cartesian position of the burst loop = 0 #I'm going to want to sample each sky position more than once, #here's where I define how many times that is locunc = [] while loop < samples: sepA = bf.angle(sourcexyz, self.normA) xA = bf.look_up_A(self.normA, sourcexyz) # print("separation from A is " + str(np.rad2deg(sepA))) #this check passes. dtheoryA = GRB.Ao * bf.response( sepA, xA) #still need to define strength, brb and gonna do that # print("dtheory test: " + str(dtheory)) # this check passes too. countsA = dtheoryA + self.bg unccountsA = sqrt(countsA) detactualA = gauss( countsA, unccountsA ) #there is a lot of noise present, updating it now. if detactualA - self.bg < 0: detactualA = self.bg #if its below the background may be wort investigating a specific level below that threshold. detcountsA = detactualA sepB = bf.angle(sourcexyz, self.normB) xB = bf.look_up_B(self.normB, sourcexyz) # print("separation from B is " + str(np.rad2deg(sepB))) #this check passes. dtheoryB = GRB.Ao * bf.response(sepB, xB) #still need to define strength, brb and gonna do that # print("dtheory test: " + str(dtheory)) # this check passes too. countsB = dtheoryB + self.bg unccountsB = sqrt(countsB) detactualB = gauss( countsB, unccountsB ) #there is a lot of noise, present, updating it now. if detactualB - self.bg < 0: detactualB = self.bg detcountsB = detactualB sepC = bf.angle(sourcexyz, self.normC) # print("separation from C is " + str(np.rad2deg(sepC))) #this check passes. xC = bf.look_up_C(self.normC, sourcexyz) dtheoryC = GRB.Ao * bf.response( sepC, xC) #still need to define strength, brb and gonna do that # print("dtheory test: " + str(dtheory)) # this check passes too. countsC = dtheoryC + self.bg #another artifact, incl this background effect somewhere unccountsC = sqrt(countsC) detactualC = gauss( countsC, unccountsC ) #there is a lot of noise, present, updating it now. if detactualC - self.bg < 0: detactualC = self.bg detcountsC = detactualC sepD = bf.angle(sourcexyz, self.normD) # print("separation from D is " + str(np.rad2deg(sepD))) #this check passes. xD = bf.look_up_D(self.normD, sourcexyz) dtheoryD = GRB.Ao * bf.response( sepD, xD) #still need to define strength, brb and gonna do that # print("dtheory test: " + str(dtheory)) # this check passes too. countsD = dtheoryD + self.bg #another artifact, incl this background effect somewhere unccountsD = sqrt(countsD) detactualD = gauss( countsD, unccountsD ) #there is a lot of noise, present, updating it now. if detactualD - self.bg < 0: detactualD = self.bg detcountsD = detactualD #coarse to fine optimization chiA = bf.quad_solver(detcountsA, self.normA, bottheta, toptheta, botphi, topphi, botA, topA, ntheta, nphi, nA, self.bg, A=True) chiB = bf.quad_solver(detcountsB, self.normB, bottheta, toptheta, botphi, topphi, botA, topA, ntheta, nphi, nA, self.bg, B=True) chiC = bf.quad_solver(detcountsC, self.normC, bottheta, toptheta, botphi, topphi, botA, topA, ntheta, nphi, nA, self.bg, C=True) chiD = bf.quad_solver(detcountsD, self.normD, bottheta, toptheta, botphi, topphi, botA, topA, ntheta, nphi, nA, self.bg, D=True) chisquared = add(add(chiA, chiB), add(chiC, chiD)) #adds it all up for total chi2 #print("Chi squareds: " +str(chisquared)) thetaloc, philoc, Aguess = bf.indexer(chisquared, bottheta, toptheta, botphi, topphi, botA, topA, ntheta, nphi, nA) recvec = ang2vec(deg2rad(thetaloc), deg2rad(philoc)) locoffset = rad2deg(bf.angle(sourcexyz, recvec)) # print("Loc offset = " + str(locoffset) + " deg") locunc.append(locoffset) loop += 1 if talk: print("Avg loc offset = " + str(average(locunc)) + " deg.") self.localizationerrors.append(np.mean(locunc)) return self.localizationerrors
def initialize(self): #first need to include the GRB. """ The 'initialize' function must be run before the BurstCube instance can run the full simulation. The way this action works is it quickly works out what what the relative response of each detector would correspond to assuming there is no gaussian noise in each detector, in other words assuming an 'ideal response' at each position (assuming the detectors can even see the burst) and this 'ideal dataset' is compared the simulation to find the best fitting sky position. Parameters ---------- Returns ------- self.ideal_data : dataframe A pandas dataframe containing the relative # of counts in each detector ideally for every sky position. """ GRB = Sky(self.nside, 1) #inherits GRB #range of values used in the fitting. skypoints = len(GRB.sourceangs) #number of GRBs you're testing ideal_responses = [] for i in range(skypoints): #for each part of the sky. sourceAng = GRB.sourceangs[i] sourcexyz = ang2vec( sourceAng[0], sourceAng[1] ) #cartesian position of the burst at this position """A""" sepA = bf.angle(sourcexyz, self.normA) xA = bf.look_up_A(self.normA, sourcexyz) dtheoryA = GRB.Ao * bf.response(sepA, xA) """B""" sepB = bf.angle(sourcexyz, self.normB) xB = bf.look_up_B(self.normB, sourcexyz) dtheoryB = GRB.Ao * bf.response(sepB, xB) """C""" sepC = bf.angle(sourcexyz, self.normC) xC = bf.look_up_C(self.normC, sourcexyz) dtheoryC = GRB.Ao * bf.response( sepC, xC) #still need to define strength, brb and gonna do that """D""" sepD = bf.angle(sourcexyz, self.normD) xD = bf.look_up_D(self.normD, sourcexyz) dtheoryD = GRB.Ao * bf.response( sepD, xD) #still need to define strength, brb and gonna do that ideal_responses.append([dtheoryA, dtheoryB, dtheoryC, dtheoryD]) ideal_responses = normalize(ideal_responses, axis=1) for i in range(len(ideal_responses)): #this is a quick fix for removing the normalizing below horizon. if ideal_responses[i][0] == ideal_responses[i][ 1] == ideal_responses[i][2] == ideal_responses[i][3]: ideal_responses[i][0] = 100 ideal_responses[i][1] = 100 ideal_responses[i][2] = 100 ideal_responses[i][3] = 100 """ ┳┻| ┻┳| ┳┻| ┻┳| ┳┻| ┻┳| ┳┻| ┻┳| ┳┻| ┻┳| ┳┻| ┻┳| ┳┻| _ ┻┳| •.•) This is now a pandas database ┳┻|⊂ノ where each index is the nside pixel #, and converted into a sky position. ┻┳| the only limit now may be NSIDE. """ self.ideal_data = DataFrame([]) self.ideal_data['A'] = ideal_responses[:, 0] self.ideal_data['B'] = ideal_responses[:, 1] self.ideal_data['C'] = ideal_responses[:, 2] self.ideal_data['D'] = ideal_responses[:, 3] return self.ideal_data