Esempio n. 1
0
def PrintInfo():
    """ Function to write an info file, for use with plotting,
    and for saving Multinest options, likelihood functions and
    priors.
    """
    # Import variable inside function, to avoid circular module
    # imports.
    import MNOptions as MN
    import Priors
    import Likelihood

    infofile = MN.outputfiles_basename + '.info'
    info = open(infofile, 'w')

    info.write('''# SuperPy info file, with cube indicies.
# This info file can be used for plot labels.\n''')

    for key, value in label.iteritems():
        # Add 1 to the keys, so that labelling begins at 1, rather than 0.
        # This is identical to the SuperBayeS info file convention.
        key = str(int(key) + 1)
        info.write('''lab%s=%s\n''' % (key, value))

    # Write the prior types and arguements.
    info.write('''# Priors.\n''')
    for key in Priors.CMSSMModelTracker().param:
        info.write('''%s %s %s\n''' %
                   (key, Priors.CMSSMModelTracker().param[key].type,
                    Priors.CMSSMModelTracker().param[key].arg))

    # Write the likelihoods types and arguments.
    info.write('''# Likelihoods.\n''')
    for key in Likelihood.CMSSMConstraintTracker().constraint:
        info.write(
            '''%s %s %s\n''' %
            (key, Likelihood.CMSSMConstraintTracker().constraint[key].type,
             Likelihood.CMSSMConstraintTracker().constraint[key].arg))

    # Write the MN parameters.
    info.write('''# MultiNest options.\n''')
    for vars in dir(MN):
        if not vars.startswith("__"):
            info.write('''%s %s\n''' % (vars, getattr(MN, vars)))
    info.close()
Esempio n. 2
0
def main():
    TRUE_WIMP = MockGen.TRUE_WIMP
    events = []
    with open('mock.txt') as f:
        events = f.read().splitlines()
    events = [float(num) for num in events]
    # define events
    E_thr = 6 * const.keV
    # pick start
    theta = np.zeros((N, 2))
    # initialize MCMC parameters
    theta[0] = [const.M_D, 1000 * const.sigma]
    # initialize values for energy
    Emin = 1 * const.keV
    Emax = fn.max_recoil_energy()
    del_Er = 1
    E_r = np.arange(Emin, Emax, del_Er)
    events = lik.find_indices(E_r, events)
    # Metropolis Hastings loop
    acceptance = []
    for i in tqdm(range(1, N)):
        proposed_theta = proposal(theta[i - 1])
        prev_lik = lik.events_likelihood(E_r, events, theta[i - 1], const.AXe,
                                         E_thr, del_Er)
        new_lik = lik.events_likelihood(E_r, events, proposed_theta, const.AXe,
                                        E_thr, del_Er)
        ratio = new_lik - prev_lik
        if ratio >= 0:
            theta[i] = proposed_theta
            acceptance.append(1)
        elif np.log(np.random.rand()) < ratio:
            theta[i] = proposed_theta
            acceptance.append(1)
        else:
            theta[i] = theta[i - 1]
    print(len(acceptance) / (N + 1))
    f = open('data.txt', 'w')
    for thetas in theta:
        f.write(str(thetas) + '\n')
    f.close()
Esempio n. 3
0
def chisquared(a0, alphas, invalpha, m0, m12, mb, mt, signmu, tanb):
    """ Wrapper for likelihood.
    Arguments:
    a0, alphas, invalpha, m0, m12, mb, mt, signmu, tanb - nine elements of cube.
    """
    ndim = 9  # Model parameters - 9 for CMSSM.
    nparams = 100  # Approximate number of items in cube.
    cube = [0] * nparams  # Initialise cube as empty list.
    # Copy arguments to cube.
    for i, arg in enumerate([a0, alphas, invalpha, m0, m12, mb, mt, signmu, tanb]):
        cube[i] = arg
    # ndim and nparams are irrelavant.
    chisquared = -2 * Likelihood.myloglike(cube, ndim, nparams)
    return chisquared
Esempio n. 4
0
def chisquared(a0, alphas, invalpha, m0, m12, mb, mt, signmu, tanb):
    """ Wrapper for likelihood.
    Arguments:
    a0, alphas, invalpha, m0, m12, mb, mt, signmu, tanb - nine elements of cube.
    """
    ndim = 9  # Model parameters - 9 for CMSSM.
    nparams = 100  # Approximate number of items in cube.
    cube = [0] * nparams  # Initialise cube as empty list.
    # Copy arguments to cube.
    for i, arg in enumerate(
        [a0, alphas, invalpha, m0, m12, mb, mt, signmu, tanb]):
        cube[i] = arg
    # ndim and nparams are irrelavant.
    chisquared = -2 * Likelihood.myloglike(cube, ndim, nparams)
    return chisquared
Esempio n. 5
0
def _main():

    # Open files and process reads dictionary
    f = open(sys.argv[1], 'r')
    reads_dict = init._process(f)

    # Get contigs from first consensus sequence
    contigs = cs.run_consensus(reads_dict)
    contig_file = open(sys.argv[2] + '/contig.txt', 'w+')
    ll_file = open(sys.argv[2] + '/likelihood.txt', 'w+')

    # Set initial parameters
    likelihood = 0
    likelihood_new = 0
    #likelihood_list = []

    for i in range(NUM_ITERS):
        '''FILE WRITES'''
        # Contigs file write data
        contig_file.write('%s\tstart\t' % (str(i)))
        for c in contigs:
            contig_file.write('%s\t' % (str(c)))
        contig_file.write('\n')
        contig_file.flush()
        # Likelihood file write data
        ll_file.write(
            '%s\t%s\t%s\n' %
            (str(i), str(likelihood), str(len(contigs)))), ll_file.flush()
        #likelihood_list.append(float(likelihood))
        # Reads file write data
        reads_file = open(sys.argv[2] + '/reads_trial_' + str(i) + '.txt', 'w')
        for r in reads_dict:
            for l in reads_dict[r]:
                reads_file.write(
                    str(l[3]) + ',' + str(l[0]) + ',' + str(l[1]) + str(',') +
                    str(l[3]) + '\n')
        reads_file.close()
        '''COMPUTATION OF ALGORITHM'''
        # Update likelihood
        likelihood = likelihood_new
        # Map reads
        reads_dict = rm.run(reads_dict, contigs)
        # Run Consensus Sequence
        contigs = cs.run_consensus(reads_dict)
        # Print data to file
        contig_file.write('%s\tmerge\t' % (str(i)))
        for c in contigs:
            contig_file.write('%s\t' % (str(c)))
        contig_file.write('\n')
        # Run merge
        contigs, reads_dict = mc.run_merge(
            contigs, reads_dict
        )  # how do we know if a merge has happened..do we need to know?
        # Get new likelihood
        likelihood_new = ll._likelihood(reads_dict, contigs)
    '''FILE WRITES'''
    # Reads file write data
    reads_file = open(sys.argv[2] + '/reads_trial_' + str(i + 1) + '.txt', 'w')
    for r in reads_dict:
        for l in reads_dict[r]:
            reads_file.write(
                str(l[3]) + ',' + str(l[0]) + ',' + str(l[1]) + str(',') +
                str(l[3]) + '\n')
    reads_file.close()
    # Print data to file
    for c in contigs:
        contig_file.write('1000\tend\t%s\n' % (str(c)))
    ll_file.write(
        '%s\t%s\t%s\n' %
        (str(NUM_ITERS), str(likelihood), str(len(contigs)))), ll_file.flush()
Esempio n. 6
0
# Tests an input point, run with
# python Tester.py a0, alphas, invalpha, m0, m12, mb, mt, signmu, tanb

# Superpy modules.
import Debug as DB  # Debug options.
import Likelihood  # Constraints and likelihood functions.

# External modules.
import sys

# Initialise the cube.
cols = 100
nparams = cols - 2  # Subract 2 for chi-squared and posterior weight.
ndim = 10  # For CNMSSM.

# Read the input parameters into the cube.
cube = [0] * nparams  # Initialise cube to an empty list.
# Plust one to ignore first argument is name of file.
if len(sys.argv) != ndim + 1:
    sys.exit("You should supply a0, alphas, invalpha, lambda, m0, m12, mb, mt, signmu, tanb")

for i in range(len(sys.argv)):
    if i == 0:
        continue  # Ignore name of file.
    # Minus one to ignore name of file, which is zeroth argument, and convert
    # string to float.
    cube[i - 1] = float(sys.argv[i])

# Call likelihood function.
loglike = Likelihood.myloglike(cube, ndim, nparams)
Esempio n. 7
0
import Likelihood  # Constraints and likelihood functions.
import PlotMod as PM  # To access this, export PYTHONPATH=$PWD/SuperPlot

# External modules.
import sys

# Check that line number was supplied.
if len(sys.argv) != 2:
    sys.exit('You should supply line number.')

# Open the chain with a GUI.
labels, data = PM.OpenData()
ndim = 10  # Number of model parameters - 10 for CNMSSM.
nparams = 100  # Approximate number of items in cube.

# Read line to re-calculate.
# Plust one to ignore first argument is name of file, e.g. python
# LineProcess.py arg1 etc
linenumber = int(sys.argv[1])  # We need to convert from string to integer.

# Initialise the cube to an empty list - it must be a list, not a dictionary.
cube = [0] * nparams

# Copy model parameters from the *.txt file to the cube.
for i in range(ndim):
    # Plus 2 for chi-squared and posterior weight.
    cube[i] = data[i + 2][linenumber]

# Re-calculate the cube etc with this likelihood function.
loglike = Likelihood.myloglike(cube, ndim, nparams)
Esempio n. 8
0
def get_final_likelihood(sto_filename, tree_filename):
	sys.path.append(os.environ['GBPML'])
	import Likelihood
	return Likelihood.main(sto_filename, tree_filename)
def _main():

    # Open files and process reads dictionary
    f = open(sys.argv[1], 'r') 
    reads_dict = init._process(f)

    # Get contigs from first consensus sequence
    contigs = cs.run_consensus(reads_dict)
    contig_file = open(sys.argv[2] + '/contig.txt', 'w+')
    ll_file = open(sys.argv[2] + '/likelihood.txt', 'w+')

    # Set initial parameters 
    likelihood = 0
    likelihood_new = 0
    #likelihood_list = []

    for i in range(NUM_ITERS):

        '''FILE WRITES'''
        # Contigs file write data
        contig_file.write('%s\tstart\t' %(str(i)))
        for c in contigs:
            contig_file.write('%s\t' %(str(c)))
        contig_file.write('\n')
        contig_file.flush()
        # Likelihood file write data
        ll_file.write('%s\t%s\t%s\n' %(str(i), str(likelihood), str(len(contigs)))), ll_file.flush()
        #likelihood_list.append(float(likelihood))
        # Reads file write data
        reads_file = open(sys.argv[2] + '/reads_trial_' + str(i) + '.txt','w')
        for r in reads_dict:
            for l in reads_dict[r]:
                reads_file.write(str(l[3])+','+str(l[0])+','+str(l[1])+str(',')+str(l[3])+'\n')
        reads_file.close()
        '''COMPUTATION OF ALGORITHM'''
        # Update likelihood
        likelihood = likelihood_new
        # Map reads
        reads_dict = rm.run(reads_dict, contigs)
        # Run Consensus Sequence
        contigs = cs.run_consensus(reads_dict)
        # Print data to file
        contig_file.write('%s\tmerge\t' %(str(i)))
        for c in contigs:
            contig_file.write('%s\t' %(str(c)))
        contig_file.write('\n')
        # Run merge
        contigs, reads_dict = mc.run_merge(contigs,reads_dict) # how do we know if a merge has happened..do we need to know?
        # Get new likelihood
        likelihood_new = ll._likelihood(reads_dict,contigs)


    '''FILE WRITES'''
    # Reads file write data
    reads_file = open(sys.argv[2] + '/reads_trial_' + str(i+1) + '.txt','w')
    for r in reads_dict:
        for l in reads_dict[r]:
            reads_file.write(str(l[3])+','+str(l[0])+','+str(l[1])+str(',')+str(l[3])+'\n')
    reads_file.close()
    # Print data to file
    for c in contigs:
        contig_file.write('1000\tend\t%s\n' %(str(c)))
    ll_file.write('%s\t%s\t%s\n' %(str(NUM_ITERS), str(likelihood), str(len(contigs)))), ll_file.flush()
Esempio n. 10
0
def main():
    E_thr = 6 * const.keV  # threshold energy for generating total number of events
    WIMP = TRUE_WIMP  # WIMP params

    target = const.AXe
    E_min = 1 * const.keV
    E_max = fn.max_recoil_energy()
    del_Er = (E_max - E_min) / N_STEPS
    E_r = np.arange(E_min, E_max, del_Er)
    weights = 1 / (1 + np.exp(-1.35 * E_r + 8.1))

    x, y = fn.integrate_rate(
        E_r, WIMP,
        target)  # expected events per kg day, as a function of E_thr
    y *= lik.BULK  # 100 kilo target, 300 day runtime

    idx = lik.find_nearest_idx(E_r, E_thr)
    mean_events = y[idx]  # expected number of events
    num_events = stats.poisson.rvs(mean_events)  # num events to generate

    event_dist = fn.diff_rate(E_r, WIMP, target)  # event energies distribution
    idx2 = lik.find_nearest_idx(E_r, 60 * const.keV)
    event_dist *= weights
    event_dist = event_dist[:idx2]
    E_r = E_r[:idx2]

    norm_fact = np.sum(event_dist)
    event_dist /= norm_fact
    true_rates = event_dist * mean_events

    custm = stats.rv_discrete(name='custm', values=(E_r, event_dist))

    samples = custm.rvs(size=num_events)
    samples = samples[samples < 100]
    fig, ax = plt.subplots()
    ax.hist(samples, bins=N_STEPS)
    for WIMP in [
            TRUE_WIMP, [500 * const.GeV, TRUE_WIMP[1]],
        [const.M_D * 10, TRUE_WIMP[1]]
    ]:
        E_min = 1 * const.keV
        E_max = fn.max_recoil_energy()
        del_Er = (E_max - E_min) / N_STEPS
        E_r = np.arange(E_min, E_max, del_Er)

        event_dist = fn.diff_rate(E_r, WIMP,
                                  target)  # event energies distribution
        event_dist *= weights
        event_dist = event_dist[:60]
        E_r = E_r[:60]

        norm_fact = np.sum(event_dist)
        event_dist /= norm_fact
        true_rates = event_dist * mean_events
        ax.plot(E_r,
                true_rates,
                label=str(WIMP[0] / 10**6) + " GeV, " +
                str(WIMP[1] / const.cm2) + "cm2")
    ax.set_ylabel("Counts")
    ax.set_xlabel("Recoil Energy (keV)")
    plt.legend()
    plt.show()
    f = open('mock.txt', 'w')
    for ele in samples:
        f.write(str(ele) + '\n')
    f.close()
Esempio n. 11
0
# parameters, rather than a list.


def chisquared(a0, alphas, invalpha, m0, m12, mb, mt, signmu, tanb, lambda):
    """ Wrapper for likelihood.
    Arguments:
    a0, alphas, invalpha, m0, m12, mb, mt, signmu, tanb, lambda - ten elements of cube.
    """
    ndim = 10  # Model parameters - 10 for CNMSSM.
    nparams = 100  # Approximate number of items in cube.
    cube = [0] * nparams  # Initialise cube as empty list.
    # Copy arguments to cube.
    for i, arg in enumerate([a0, alphas, invalpha, m0, m12, mb, mt, signmu, tanb, lambda]):
        cube[i] = arg
    # ndim and nparams are irrelavant.
    chisquared = -2 * Likelihood.myloglike(cube, ndim, nparams)
    return chisquared

# Setup initial values, errors etc.
kwdarg = dict(
    a0=-3000,
    error_a0=100,
    alphas=1.18400000e-01,
    fix_alphas=True,
    invalpha=1.27944000e+02,
    fix_invalpha=True,
    m0=400,
    error_m0=100,
    m12=900,
    error_m12=100,
    mb=4.18000000e+00,
Esempio n. 12
0
def treeLogLikelihood(conf, tree, stree, gene2species, params, baserate=None):
    conf.setdefault("bestlogl", -util.INF)
    
    if pyspidir == None or conf.get("python_only", False):
        return Likelihood.treeLogLikelihood_python(conf, tree, stree, 
                                                   gene2species, params, 
                                                   baserate=baserate, 
                                                   integration="fastsampling")

    # debug info
    if isDebug(DEBUG_MED):
        util.tic("find logl")
    

    # derive relative branch lengths
    #tree.clearData("logl", "extra", "fracs", "params", "unfold")
    recon = phylo.reconcile(tree, stree, gene2species)
    events = phylo.label_events(tree, recon)
    
    # determine if top branch unfolds
    if recon[tree.root] ==  stree.root and \
       events[tree.root] == "dup":
        for child in tree.root.children:
            if recon[child] != stree.root:
                child.data["unfold"] = True

    # top branch is "free"
    params[stree.root.name] = [0,0]
    this = util.Bundle(logl=0.0)
    
    if conf.get("generate_int", False):
        baserate = -99.0 # indicates in integration over gene rates is requested
    elif baserate == None:
        baserate = Likelihood.getBaserate(tree, stree, params, recon=recon)
        
    
    phylo.midroot_recon(tree, stree, recon, events, params, baserate)
    
    # calc likelihood in C
    this.logl = treeLikelihood_C(conf, tree, recon, events, stree, params, 
                                 baserate, gene2species)
    
    # calc probability of rare events
    tree.data["eventlogl"] = Likelihood.rareEventsLikelihood(conf, tree, stree, recon, events)
    
    # calc penality of error
    tree.data["errorlogl"] = tree.data.get("error", 0.0) * \
                             conf.get("errorcost", 0.0)
    this.logl += tree.data["errorlogl"]
    
    # add logl of sequence evolution
    this.logl += tree.data.get("distlogl", 0.0)
    
    if baserate == -99.0: # indicates in integration over gene rates is requested
        baserate = Likelihood.getBaserate(tree, stree, params, recon=recon) 
    
    tree.data["baserate"] = baserate
    tree.data["logl"] = this.logl
    
    if isDebug(DEBUG_MED):
        util.toc()
        debug("\n\n")
        drawTreeLogl(tree, events=events)
        
    return this.logl
Esempio n. 13
0
parser.add_argument("--threads",
                    dest="threads",
                    help="Number of threads",
                    type=str,
                    default=8)
parser.add_argument("--logmslim",
                    dest="logMSlim",
                    help="Lower limit on stellar mass",
                    type=str,
                    default=None)
args = parser.parse_args()
ncore = int(args.threads)
logSMlim = float(args.logMSlim)

# Initiate my likelihood model
model = Likelihood.Model(logSMlim, generator=True)
print("Initiated the model")
sys.stdout.flush()

# How dense should the grid be
alphas = np.linspace(p.min_alpha, p.max_alpha, p.Nalphas)
scatters = np.linspace(p.min_scatter, p.max_scatter, p.Nscatters)

# Say x-dimension corresponds to alpha, y-dimension corresponds to scatter
XX, YY = np.meshgrid(alphas, scatters)
ndim1, ndim2 = XX.shape

# Calculate the stochastic covariance matrix at these values
Niter = 40
Ntot = XX.size
Esempio n. 14
0
def get_final_likelihood(sto_filename, tree_filename):
	import Likelihood
	return Likelihood.main(sto_filename, tree_filename)
Esempio n. 15
0
# File names - best to convert to absolute path here.
outputfiles_basename = os.path.abspath('./chains/CMSSM')

# These are the settings that are passed to MultiNest.
# Loglike callback function.
LogLikelihood = Likelihood.myloglike
# Prior callback function.
Prior = Priors.myprior
# Number of model parameters.
n_dims = len(Priors.CMSSMModelTracker().param)
# Total number of parameters in cube.
# This is model parameters + constraints + chi2 from constraints + 33
# sparticle/particle masses + mu + neutralino mixing.
n_params = n_dims + 2 * \
    len(Likelihood.CMSSMConstraintTracker().constraint) + 33 + 1 + 16
# Which parameters to mode cluster, first n.
n_clustering_params = 2
# Should be a vector of 1s and 0s - 1 = wrapped, 0 = unwrapped.
wrapped_params = None
# Whether to separate modes.
multimodal = True
const_efficiency_mode = False
n_live_points = 10
evidence_tolerance = 1.0
sampling_efficiency = 2.0
n_iter_before_update = 1
null_log_evidence = -1e+90
# For memory allocation only, if memory exceeded, MultiNest halts.
max_modes = 5
# Seed the random number generator, if -1 seeded from system clock.
Esempio n. 16
0
outputfiles_basename = os.path.abspath('./chains/CNMSSM')

# These are the settings that are passed to MultiNest.
# Loglike callback function.
LogLikelihood = Likelihood.myloglike
# Prior callback function.
Prior = Priors.myprior
# Number of model parameters.
n_dims = len(Priors.CNMSSMModelTracker().param)
# Total number of parameters in cube.
# This is model parameters + constraints + chi2 from constraints + 33
# sparticle/particle masses + mu + neutralino mixing.
# For NMSSM, neutralino mixing matrix is 5 by 5.
# For NMSSM, 2 extra masses for singlet and singlino.
n_params = n_dims + 2 * \
    len(Likelihood.CNMSSMConstraintTracker().constraint) + 33 + 2 + 1 + 25
# Which parameters to mode cluster, first n.
n_clustering_params = 2
# Should be a vector of 1s and 0s - 1 = wrapped, 0 = unwrapped.
wrapped_params = None
# Whether to separate modes.
multimodal = True
const_efficiency_mode = False
n_live_points = 10
evidence_tolerance = 1.0
sampling_efficiency = 2.0
n_iter_before_update = 1
null_log_evidence = -1e+90
# For memory allocation only, if memory exceeded, MultiNest halts.
max_modes = 5
# Seed the random number generator, if -1 seeded from system clock.
                    type=str,
                    default=8)
parser.add_argument("--perccat",
                    dest="perccat",
                    help="Sets how much of the catalog to exclude",
                    type=str,
                    default=None)
args = parser.parse_args()
ncores = int(args.threads)
perccat = float(args.perccat)

cuts_def = p.load_pickle("../../Data/BMmatching/logMBcuts_def.p")
logBMlim = cuts_def[perccat]

# Initiate my likelihood model
model = Likelihood.Model(logBMlim, perccat, generator=True)
print("Initiated the model!")
sys.stdout.flush()

alphas = np.linspace(p.min_alpha, p.max_alpha, p.Nalphas)
scatters = np.linspace(p.min_scatter, p.max_scatter, p.Nscatters)

# Say x-dimension corresponds to alpha, y-dimension corresponds to scatter
XX, YY = np.meshgrid(alphas, scatters)
ndim1, ndim2 = XX.shape
# Calculate the stochastic covariance matrix at these values
Niter = 1
covmats = np.zeros(shape=(ndim1, ndim2, p.nbins, p.nbins))
Ntot = XX.size
k = 1
extime = list()
Esempio n. 18
0
    def scan(self, regfilt, threshold=5.0):

        bins = numpy.arange(self.time.min(),
                            self.time.max() + 1e-4, self.timebin)

        xbins = numpy.arange(self.xmin, self.xmax, 25)
        ybins = numpy.arange(self.ymin, self.ymax, 25)

        region = Likelihood.Region(self.xmin, self.xmax, self.ymin, self.ymax,
                                   40, regfilt, self.expomap, self.eventfile)

        ls = Likelihood.Likelihood(self.X, self.Y, region)

        # Build the likelihood model

        # Bkg model (isotropic)
        iso = Likelihood.Isotropic("bkg", 1.0)

        m = Likelihood.GlobalModel("likeModel") + iso

        knownSources = []

        for i, src in enumerate(self.sources):

            thisSrc = Likelihood.PointSource("src%i" % (i + 1), src[0], src[1],
                                             self.eventfile, self.hwu)

            m += thisSrc

            if i >= 10:
                log.info("Fixing normalization of source %s" % (i + 1))

                thisSrc["src%i_norm" % (i + 1)].fix()

            knownSources.append(thisSrc)

        ls.setModel(m)

        # Minimize the mlogLike to get the background level
        like0 = ls.minimize()

        figIntegrated = ls.plot()

        excessesFound = []

        figs = [figIntegrated]

        for t1, t2 in zip(bins[:-1], bins[1:]):

            sys.stderr.write(".")

            idx = (self.time >= t1) & (self.time < t2)

            # db = DbScanner(self.X[idx],self.Y[idx])

            # db = SAODetect(self.X[idx],self.Y[idx])

            # db = cellDetect.CellDetect(self.X[idx],self.Y[idx],200,3)

            ls = Likelihood.Likelihood(self.X[idx], self.Y[idx], region)

            totCounts = ls.getTotalCounts()

            if totCounts < 3:
                # No need to perform any fit. Too few counts in this
                # interval

                log.debug("Skip interval %s - %s, less than 3 counts here" %
                          (t1, t2))

                continue

            # Build the likelihood model

            # Bkg model (isotropic)
            iso = Likelihood.Isotropic("bkg", 0.1)

            m = Likelihood.GlobalModel("likeModel") + iso

            for i, src in enumerate(knownSources):
                m += src

            ls.setModel(m)

            # Minimize the mlogLike to get the background level
            like0 = ls.minimize(verbose=0)

            # ls.plot()

            bkgLevel = iso['bkg_amp'].value / pow(region.binsize, 2.0)
            error = iso['bkg_amp'].error / pow(region.binsize, 2.0)

            # print("Bkg: %s +/- %s" %(bkgLevel, error))

            db = cellDetect.CellDetect(self.X[idx], self.xmin, self.xmax,
                                       self.Y[idx], self.ymin, self.ymax, 200,
                                       3)

            centroids = db.findExcesses(bkgLevel * pow(200, 2),
                                        error * pow(200, 2))

            newSources = []

            if len(centroids) > 0:
                # Verify if this source is truly significant

                TSs = []

                for (x, y) in centroids:

                    sys.stderr.write("-")

                    # Avoid computing the TS for a source too close to the ones already
                    # known

                    d = map(
                        lambda src: math.sqrt(
                            pow(src.xpos - x, 2) + pow(src.ypos - y, 2)),
                        knownSources)

                    dd = filter(lambda x: x < 5.0 / 0.05, d)

                    if len(dd) > 0:
                        # There is a source close to this centroid. No need
                        # to investigate further
                        continue

                    thisSrc = Likelihood.PointSource("testsrc", x, y,
                                                     self.eventfile, self.hwu,
                                                     knownSources[0].outfile)

                    ls.model += thisSrc

                    like1 = ls.minimize(verbose=0)

                    TSs.append(2 * (like0 - like1))
                    # print("(X,Y) = (%s,%s) -> TS = %s" %(x,y,TSs[-1]))

                    ls.model.removeSource("testsrc")

                    # Using the approximation TS = sqrt( significance )

                    if TSs[-1] >= pow(threshold, 2):
                        newSources.append([x, y, TSs[-1]])

            if len(newSources) > 0:
                db.centroids = numpy.array(newSources)
                fig = db.plot(xbins, ybins)

                figs.append(fig)

                for (x, y, ts) in newSources:
                    excessesFound.append([t1, t2, x, y, math.sqrt(ts)])

        sys.stderr.write("\n")

        return excessesFound, figs