def distance_limit(self, distrange=[1, 10], Xmin=0.01, gridsize=10, numtrains=100, skiplo=True): """ Generate burst trains and test how many of them are consistent with the GTIs """ assert len(distrange) == 2 X_0, Z, Q_b, f_a, f_E, r1_0, r2, r3, mass, radius = self.theta # Loop over the distance values distance = np.linspace(distrange[0], distrange[1], gridsize) X = np.linspace(Xmin, 0.7, gridsize) idist = cd.iso_dist(nsamp=gridsize) xi_b, xi_p = cd.anisotropy(idist) # initialise the array to keep to grid results result = np.zeros((gridsize, gridsize)) for i, _distance in enumerate(distance): # For each distance, loop over the h-fraction range for j, _X in enumerate(X): # initialise result array valid_array = [] print('Drawing one of {} values for xi_p:'.format( len(xi_p.distribution))) # For each distance and h-fraction, draw a bunch of inclination values and xi_p's: for _xi_p in xi_p.distribution: r1 = _xi_p.value * (_distance / 10)**2 theta_1 = (_X, Z, Q_b, f_a, f_E, r1, r2, r3, mass, radius) # Run for just one burst to get the initial interval # Set ref_ind to be zero, will subsequently distribute the start burst times # between up to the simulated interval test, valid = runmodel(theta_1, self.y, 0.0, self.bstart, self.pflux, self.pfluxe, self.tobs, 1, 1, 0.0, 0, self.train, debug=False) print("result: ", test, valid) # self.plot_model(test) # find the initial range of start times to distribute the bursts over if (test is not None) & (len(test['time']) > 1): # iref = np.argmin(abs(np.array(test['time'])-self.tref)) iref = np.argmin(abs(np.array(test['time']))) intvl = test['time'][iref + 1] else: # If we can't get an initial burst train, distribute the start times # over the entire outburst intvl = max(self.tobs) # This piece of code is probably redundant now that next_burst checks for # predicted burst times beyond tobs if (intvl > 2. * max(self.tobs)) and skiplo: print( 'Skipping trials with d={:.4f} and X={:.4f} due to excessive tdel {} >> {}' .format(_distance, _X, intvl, max(self.tobs))) valid_array.append(True) else: # intvl = min([test['time'][iref+1], max(self.tobs)]) trials = np.linspace(0., intvl, numtrains) print( 'Simulating {} trains with reference burst distributed within (0, {:.3f})' .format(numtrains, intvl)) for trial in trials: print('trial {:.4f}:'.format(trial)) # Set nburstssim to 100 below, just need to make sure it's sufficient to cover # the whole outburst. Replace ref_ind with trial, as the starting burst time # (ref_ind is meaningless if there's no bursts) test, valid = runmodel(theta_1, self.y, 0.0, self.bstart, self.pflux, self.pfluxe, self.tobs, 100, 100, trial, 1, self.train, gti_start=self.st, gti_end=self.et, debug=False) # for debugging # self.plot_model(test) # breakpoint() valid_array.append(valid) print(' valid={}'.format(valid)) result[i, j] = len(np.where(valid_array)[0]) / len(valid_array) print('End of loop for d={}, X={}, % valid is {}'.format( _distance, _X, 100 * result[i, j])) breakpoint()
def __init__(self, ndim=10, nwalkers=200, nsteps=100, run_id="1808/test1", obsname='1808_obs.txt', burstname='1808_bursts.txt', gtiname='1808_gti.txt', theta=(0.44, 0.01, 0.18, 2.1, 3.5, 0.108, 0.90, 0.5, 1.4, 11.2), numburstssim=3, numburstsobs=4, bc=2.21, ref_ind=1, gti_checking=0, threads=4, restart=False, train=None): from initialise import init from run_model import runmodel # Set up initial conditions: self.ndim = ndim self.nwalkers = nwalkers # nwalkers and nsteps are the number of walkers and number of steps for emcee to do self.nsteps = nsteps self.run_id = run_id # Where you want output to be saved and under what name self.theta = theta # Set starting value for each theta parameter, Recall odering, theta: X, Z, Q_b, f_a, f_E, r1, r2, r3 self.threads = threads # Number of threads for emcee to use (e.g. number of cores your computer has) self.numburstssim = numburstssim # this needs to be an integer value of half the number of bursts you want to simulate. I.e. simulate this many from the reference burst in either direction. Don't forget to account for missed bursts! self.numburstsobs = numburstsobs # number of observed bursts in your dataset self.ref_ind = ref_ind # Define index of reference burst (should be middle of predicted burst train). This burst will not be simulated but will be used as a reference to predict the other bursts. self.gti_checking = gti_checking #Option to turn on gti time checking (1 for on, 0 for off): self.obsname = obsname #set location of your observation data files self.burstname = burstname #set location of your burst data files self.gtiname = gtiname #set location of your gti data files self.bc = bc #bolometric correction to apply to your persistent flux (1.0 if they are already bolometric fluxes): self.restart = restart #if your run crashed and you would like to restart from a previous run, with run_id above, set this to True if train is None: train = 1 self.train = train # 1 For whether you want to generate a burst train or 0 for work on non contigius bursts else: train = 0 self.train = train self.x, self.y, self.yerr, self.tref, self.bstart, self.pflux, self.pfluxe, self.tobs, self.fluen, self.st, self.et = init( ndim, nwalkers, theta, run_id, threads, numburstssim, numburstsobs, ref_ind, gti_checking, obsname, burstname, gtiname, bc, restart, train) print(self.st, self.et) # # -------------------------------------------------------------------------# # # TEST THE MODEL WORKS # # -------------------------------------------------------------------------# print( "# -------------------------------------------------------------------------#" ) print("Doing Initialisation..") print("Testing the model works..") test, valid = runmodel(self.theta, self.y, self.tref, self.bstart, self.pflux, self.pfluxe, self.tobs, self.numburstssim, self.numburstsobs, self.ref_ind, self.gti_checking, self.train, self.st, self.et, debug=False) # set debug to True for testing print("result: ", test, valid) self.plot_model(test)
def lnlike(self, theta_in, x, y, yerr): # define y = "data" parameters for x, i in zip( [x for x in range(0, len(self.bstart) - 1) if x != self.ref_ind], [i for i in range(0, len(self.bstart) - 1) if i != self.ref_ind]): globals()['t%s' % i] = self.y[x] for x, i in zip( range( len(self.bstart) - 1, len(self.fluen) + len(self.bstart) - 1), range(0, len(self.bstart))): globals()['Eb%s' % i] = self.y[x] for x, i in zip( range(len(self.fluen) + len(self.bstart) - 1, len(self.y)), range(0, len(self.bstart - 1))): globals()['a%s' % i] = self.y[x] # define yerr as variance terms (errors) for our data parameters (listed in same order as for y) # *note that we have to enter three time errors for the code to work however in reality the error should be the same for all of them (st0, st2 and st3 are really dummy parameters) for x, i in zip( [x for x in range(0, len(self.bstart) - 1) if x != self.ref_ind], [i for i in range(0, len(self.bstart) - 1) if i != self.ref_ind]): globals()['st%s' % i] = self.yerr[x] for x, i in zip( range( len(self.bstart) - 1, len(self.fluen) + len(self.bstart) - 1), range(0, len(self.bstart))): globals()['sEb%s' % i] = self.yerr[x] for x, i in zip( range(len(self.fluen) + len(self.bstart) - 1, len(self.y)), range(0, len(self.bstart - 1))): globals()['sa%s' % i] = self.yerr[x] # define theta = model parameters, which we define priors for X, Z, Q_b, f_a, f_E, r1, r2, r3, mass, radius = theta_in # Instead of treating s_t as a parameter, we just hardwire it here s_t = 10.0 / 1440.0 # call model from IDL code defined as modeldata(base, z, x, r1, r2 ,r3) model, valid = runmodel(theta_in, y, self.tref, self.bstart, self.pflux, self.pfluxe, self.tobs, self.numburstssim, self.numburstsobs, self.ref_ind, self.gti_checking, self.train, self.st, self.et) if not valid: return -np.inf, model # multiplying by scaling factors to match with the data model[len(self.bstart) - 1:len(self.fluen) + len(self.bstart) - 1] *= r3 model[len(self.fluen) + len(self.bstart) - 1:len(self.y)] *= r2 # To simplify final likelihood expression we define inv_sigma2 for each data parameter that describe the error. # The variance (eg sEb0) is underestimated by some fractional amount, f, for each set of parameters. sEb = yerr[len(self.bstart) - 1:len(self.fluen) + len(self.bstart) - 1] sa = yerr[len(self.fluen) + len(self.bstart) - 1:len(self.yerr)] inv_sigma2 = [] for i in range(0, len(self.bstart) - 1): inv_sigma2.append(1.0 / (s_t**2)) for i in range(0, len(self.bstart)): inv_sigma2.append(1.0 / ((sEb[i] * f_E)**2)) for i in range(0, len(self.bstart) - 1): inv_sigma2.append(1.0 / ((sa[i] * f_a)**2)) # Final likelihood expression cpts = (self.y - (model))**2 * inv_sigma2 - (np.log(inv_sigma2)) # Test if the result string is defined here. It is, so we return the selected elements of result # instead of the downselection in model base = Q_b z = Z x = X r1 = r1 r2 = r2 r3 = r3 mass = mass radius = radius if self.train == 1: model2 = generate_burst_train(base, z, x, r1, r2, r3, mass, radius, self.bstart, self.pflux, self.pfluxe, self.tobs, self.numburstssim, self.ref_ind) else: model2 = burstensemble(base, x, z, r1, r2, r3, mass, radius, self.bstart, self.pflux, self.numburstsobs) #model2 = np.string_(model2, dtype='S1000') model2 = str(model2).encode('ASCII') # Now also return the model return -0.5 * np.sum(cpts), model2