Ejemplo n.º 1
0
    def in_simplex(self, newp, tol=0, eq_tol=1e-8, verbose=0):
        bad = []

        a_min = np.inf
        for i,[c,e] in enumerate(self.eq_list):
            a0 = np.dot(newp, e[1:])
            a  = e[0] + a0

            #print newp.flags, e[1:].flags
            #assert 0
            if c == 'geq':
                a_min = min(a_min, a)
                if a < -tol: 
                    if verbose: Log( 'F> %i %e' % (i,a) )
                    bad.append([i,a])
            elif c == 'leq':
                a_min = min(a_min, a)
                if a > tol: 
                    if verbose: Log( 'F< %i %e' % (i,a) )
                    bad.append([i,a])
            elif c == 'eq':
                if np.abs(a) > eq_tol: 
                    if verbose: Log( 'F= %i %e %e' %(i,a, (1 - np.abs(e[0]/a0))) )
                    bad.append([i,a])

            #if verbose > 1: print "TT", c, a
              
        if verbose > 1:
            Log( 'Smallest a was %e' % (a_min,) )

        #print 'T '
        return not bad, bad
Ejemplo n.º 2
0
    def compute_eval_evec(self, store, eval, evec, n_stored):

        #s = max(0, n_stored - ceil(0.5*self.burnin_len))
        s = 0

        eval0,evec0 = eigh(np.cov(store[:,  s:n_stored]))
        avg = store[:,  s:n_stored].mean(axis=1)

        if self.avg0 is not None:
            Log( 'average store delta %s' % str(norm(avg-self.avg0)) )
        self.avg0 = avg.copy()

        nzero = 0
        for r in range(eval.shape[0]):
            if eval0[r] < 1e-12:
                eval[r] = 0
                nzero += 1
            else:
                direction = evec0[:,r]
                tmax1 = -self.distance_to_plane(avg, -direction)
                tmax2 = +self.distance_to_plane(avg, +direction)
                #print 'tmax', tmax1, tmax2
                eval[r] = (tmax2 - tmax1) / np.sqrt(12)

        evec[:] = evec0
        #print 'eval(inside)', eval
        if nzero != self.eq_count:
            Log( '!'*80 )
            Log( 'ERROR:', 'Expected number of zero length eigenvectors (%i) to equal number of equality constraints (%i)' % (nzero, self.eq_count) )
            Log( '!'*80 )
            sys.exit(0)
Ejemplo n.º 3
0
def rwalk(id, nmodels, samplex, q, cmdq, vec, twiddle, eval, evec, seed):

    S = np.zeros(samplex.eqs.shape[0])
    S0 = np.zeros(samplex.eqs.shape[0])

    vec = vec.copy('A')
    eqs = samplex.eqs.copy('A')

    accepted = 0
    rejected = 0

    offs = ' ' * 39
    Log(offs + 'STARTING rwalk THREAD %i [this thread makes %i models]' %
        (id, nmodels),
        overwritable=True)

    eqs[:, 1:] = np.dot(samplex.eqs[:, 1:], evec)

    #csamplex.set_rwalk_seed(1 + id + samplex.random_seed)
    if seed is not None: csamplex.set_rwalk_seed(1 + seed)

    log_time = time.clock()

    offs = ' ' * 36
    state = ''
    for i in xrange(nmodels):

        accepted = 0
        rejected = 0

        done = False

        vec[:] = np.dot(evec.T, vec)
        accepted, rejected, t = csamplex.rwalk(samplex, eqs, vec, eval, S, S0,
                                               twiddle, accepted, rejected)
        vec[:] = np.dot(evec, vec)

        r = accepted / (accepted + rejected)

        if time.clock() - log_time > 3:
            Log(offs +
                '% 2s THREAD %3i  %i  %4.1f%% accepted  (%6i/%6i Acc/Rej)  twiddle %5.2f  time %5.3fs  %i left.'
                % (state, id, i, 100 * r, accepted, rejected, twiddle, t,
                   nmodels - i),
                overwritable=True)
            log_time = time.clock()

        #print ' '*36, '% 2s THREAD %3i  %i  %4.1f%% accepted  (%6i/%6i Acc/Rej)  twiddle %5.2f  time %5.3fs  %i left.' % (state, id, i, 100*r, accepted, rejected, twiddle, t, nmodels-i)
        assert np.all(vec >= 0), vec[vec < 0]
        #if numpy.any(vec < 0): sys.exit(0)

        samplex.project(vec)

        q.put([id, vec.copy('A')])
Ejemplo n.º 4
0
def mass_plot(model,
              obj_index,
              with_contours=True,
              only_contours=False,
              clevels=30):
    Log("WARNING: use of mass_plot is deprecated. Use kappa_plot instead.")
    return kappa_plot(model, obj_index, with_contours, only_contours, clevels)
Ejemplo n.º 5
0
def sigpf(objmodel, vdisp, tol, chisq_cut):
    """Return True if chi-squared value for the object's sigp is <= chisq_cut."""
    obj, data = objmodel
    chisq = (vdisp - data['sigp:sigp_sing'])**2 / tol**2
    Log('chi2 is %f' % chisq)
    data['sigp:chisq'] = chisq
    return chisq <= chisq_cut
Ejemplo n.º 6
0
        def adjust_threads(i, cont_cmd):
            pause_threads(threads)
            drainq(q)
            Log( 'Computing eigenvalues... [%i/%i]' % (i, burnin_len) )
            self.compute_eval_evec(store, eval, evec, n_stored)

            # new twiddle <-- average twiddle
            t = 0
            for _,cmdq,ackq in threads:
                cmdq.put(['REQ TWIDDLE'])
                t += ackq.get()
            t /= len(threads)

            Log( 'New twiddle %f' % t )
            for _,cmdq,_ in threads:
                cmdq.put(['NEW DATA', [eval.copy('A'), evec.copy('A'), t]])
                cmdq.put([cont_cmd])
Ejemplo n.º 7
0
    def start(self):

        def eq_key(x):
            if x[0] == 'geq': return 2
            if x[0] == 'leq': return 1
            if x[0] == 'eq':  return 0
            assert False, 'Bad function %s' % str(x[0])

        self.eq_list.sort(key=eq_key)

        if 0:
            def print_array(out, fs, arr):
                out.write("%s %i " % (fs, len(arr)));
                for i in arr: 
                    if i in [0,1,-1]:
                        out.write("%10i " % i)
                    else:
                        out.write("%.4e " % i)
                out.write("\n")
            Log( 'Writing out equations...' )
            out = open('eqs-new', 'w')
            for f,a in self.eq_list:
                if f == self._eq:  fs = 'eq'
                if f == self._geq: fs = 'geq'
                if f == self._leq: fs = 'leq'
                print_array(out, fs, a)
            out.close()
            Log( 'done.' )


        self.eqn_count = self.eq_count + self.geq_count + self.leq_count

        if 0:
            import pylab as pl
            m = np.empty((len(self.eq_list), len(self.eq_list[0][1])))
            print m.shape
            for i,e in enumerate(self.eq_list):
                f,a = e
                m[i] = a
                if f == self._eq:  m[i][m[i] != 0] = 1
                if f == self._geq: m[i][m[i] != 0] = 2
                if f == self._leq: m[i][m[i] != 0] = 3
            #m[m != 0] = 1
            pl.matshow(m)
            pl.show()
Ejemplo n.º 8
0
def _filter_one(arg):
    model, i, nmodels = arg
    for obj, data in model['obj,data']:
        if obj.post_filter_funcs:
            Log('Post filtering ... Model %i/%i Object %s' %
                (i + 1, nmodels, obj.name))
            for f, args, kwargs in obj.post_filter_funcs:
                if not f([obj, data], *args, **kwargs): return False
    return True
Ejemplo n.º 9
0
def model(env, nmodels=None, *args, **kwargs):

    Log('=' * 80)
    Log('GLASS version 0.1  %s' % time.asctime())
    Log('=' * 80)

    for o in env.objects:
        o.init()

    report(env)

    #init_model_generator(nmodels)

    if env.models is None:
        env.models = []
        env.solutions = []

    models = []
    solutions = []

    if nmodels is None:
        m = {
            'sol': None,
            'obj,data': [[o, {}] for o in env.objects],
            'tagged': False
        }
        models.append(m)
    else:
        for i, m in enumerate(
                generate_models(env.objects, nmodels, *args, **kwargs)):
            Log('Model %i/%i complete.' % (i + 1, nmodels), overwritable=True)
            models.append(m)
            solutions.append(m['sol'])
            #print 'glcmds.py:model ???', id(m['sol'])

        Log('Generated %i model(s).' % len(models))
        _post_process(models)

    env.models.extend(models)
    env.solutions.extend(solutions)
    env.accepted_models = _filter(env.models)
Ejemplo n.º 10
0
    def inner_point(self, newp):

        lp = lpsolve('make_lp', 0, self.nVars+1) # +1 for variable used to find the first inner point
        lpsolve('set_epsb', lp, 1e-14)
        lpsolve('set_epsd', lp, 1e-14)
        lpsolve('set_epsint', lp, 1e-14)
        lpsolve('set_epsel', lp, 1e-8)
        lpsolve('set_verbose', lp, FULL)
        lpsolve('set_sense', lp, False)

        for eq,a in self.eq_list:
            l = (a[1:]).tolist()
            if eq ==  'eq': l.append(0); lpsolve('add_constraint', lp, l, EQ, -a[0])
            if eq == 'leq': l.append(1); lpsolve('add_constraint', lp, l, LE, -a[0])
            if eq == 'geq': l.append(1); lpsolve('add_constraint', lp, l, GE, -a[0])

        for i in range(self.nVars):
            q = np.zeros(self.nVars+1)
            q[[i,-1]] = -1, 1
            lpsolve('add_constraint', lp, q.tolist(), LE, 0)

        o = np.zeros(self.nVars+1)
        o[-1] = 1
        lpsolve('set_obj_fn', lp, o.tolist())
        while True:
            result = lpsolve('solve', lp)
            if   result in [OPTIMAL, TIMEOUT]:   break
            elif result == SUBOPTIMAL: continue
            elif result == INFEASIBLE: raise SamplexNoSolutionError()
            elif result == UNBOUNDED: raise SamplexUnboundedError()
            else:
                Log( result )
                raise SamplexUnexpectedError("unknown pivot result %i from linear solver." % result)

        objv  = np.array(lpsolve('get_objective', lp))
        v1    = np.array(lpsolve('get_variables', lp)[0])
        assert len(v1) == lpsolve('get_Norig_columns', lp)
        assert len(v1) == self.nVars+1
        del lp

        v1 = v1[:-1] # Remove the temporary variable that tracks the distance from the simplex boundary
        v1[np.abs(v1) < 1e-14] = 0
        assert np.all(v1 >= 0), v1[v1 < 0]

        ok,fail_count = self.in_simplex(v1, eq_tol=1e-12, tol=0, verbose=1)
        ok,fail_count = self.in_simplex(v1, eq_tol=1e-12, tol=-1e-13, verbose=1)
        assert ok, len(fail_count)
        newp[:] = v1
        self.project(newp)
        ok,fail_count = self.in_simplex(newp, eq_tol=1e-12, tol=0, verbose=1)
        ok,fail_count = self.in_simplex(newp, eq_tol=1e-12, tol=-1e-5, verbose=1)
Ejemplo n.º 11
0
def _post_process(models):
    nmodels = len(models)
    nProcessed = 0
    for i, m in enumerate(models):
        has_ppfs = False
        for o, data in m['obj,data']:
            if o.post_process_funcs:
                has_ppfs = True
                #print 'Post processing ... Model %i/%i Object %s' % (i+1, nmodels, o.name)
                for f, args, kwargs in o.post_process_funcs:
                    f((o, data), *args, **kwargs)
        nProcessed += has_ppfs
    Log('Post processed %i model(s), %i had post processing functions applied.'
        % (nmodels, nProcessed))
Ejemplo n.º 12
0
def reprocess(env, state_file):
    for o in env.objects:
        Log(o.name)
        o.init()

    e = loadstate(state_file, setenv=False)
    env.solutions = e.solutions

    #init_model_generator(len(env.solutions))

    env.models = [m for m in regenerate_models(env.objects)]
    _post_process()

    #env.models = parallel_map(_f, regenerate_models(env.objects), threads=10)
    env.accepted_models = _filter(env.models)
Ejemplo n.º 13
0
def savestate(env, fname):

    Log('Saving state to %s' % fname)

    env.meta_info['glheader'] = '\n'.join(
        ['GLASS version 0.1',
         'CREATED ON: %s' % time.asctime()])

    #ppf = env.post_process_funcs
    #pff = env.post_filter_funcs
    #env.post_process_funcs = []
    #env.post_filter_funcs = []

    import numpy as np
    with open(fname, 'w') as f:
        savez(f, env)
Ejemplo n.º 14
0
def XXXreprocess(state_file):
    for o in env.objects:
        Log(o.name)
        o.init()

    env.solutions = loadstate(state_file, setenv=False).solutions

    init_model_generator(len(env.solutions))

    env.models = []
    for i, m in enumerate(regenerate_models(env.objects)):
        for o, data in m['obj,data']:
            for f, args, kwargs in o.post_process_funcs:
                f((o, data), *args, **kwargs)

        env.models.append(m)

    env.accepted_models = env.models
Ejemplo n.º 15
0
def generate_models(env, objs, n, *args, **kwargs):

    #if n <= 0: return

    mode = kwargs.get('mode', 'default')

    if mode == 'particles':
        assert n == 1, 'Can only generate a single model in particles mode.'
        assert len(objs) == 1, 'Can only model a single object from particles.'
        data = kwargs.get('data', None)
        assert data is not None, 'data keyword must be given with model parameters.'
        objs[0].basis.array_offset = 1
        ps = _particle_model(objs[0], *data)

        if opts.get('solver', None):
            init_model_generator(env, n)
            check_model(objs, ps)

        yield ps

    elif mode == 'grid':
        assert n == 1, 'Can only generate a single model in grid mode.'
        assert len(objs) == 1, 'Can only model a single object from a grid.'
        data = kwargs.get('data', None)
        assert data is not None, 'data keyword must be given with model parameters.'
        objs[0].basis.array_offset = 1
        ps = _grid_model(objs[0], *data)

        if opts.get('solver', None):
            init_model_generator(env, n)
            check_model(objs, ps)

        yield ps

    elif mode == 'isothermal':
        assert n == 1, 'Can only generate a single model in isothermal mode.'
        assert len(
            objs) == 1, 'Can only model a single object from isothermal.'
        data = kwargs.get('data', None)
        assert data is not None, 'data keyword must be given with model parameters.'
        objs[0].basis.array_offset = 1

        ps = objs[0].basis.solution_isothermal(*data)
        m = {
            'sol': None,
            'obj,data': [[objs[0], ps]],
            'obj,sol': None,
            'tagged': False
        }
        for od in m['obj,data']:
            default_post_process(od)
        yield m

    elif mode != 'default':
        assert False, 'Unsupported model mode "%s"' % mode
    else:

        if opts.get('solver', None):
            init_model_generator(env, n)
            mg = env.model_gen
            mg.start()
            try:
                for sol in mg.next(n):
                    ps = package_solution(sol, objs)
                    check_model(objs, ps)
                    yield ps
            except GlassSolverError as e:
                Log('!' * 80)
                Log('Unable to generate models:', str(e))
                Log('!' * 80)
Ejemplo n.º 16
0
def init_model_generator(env, nmodels, regenerate=False):
    """Construct the linear constraint equations by applying all the
       enabled priors."""

    objs = env.objects

    # -------------

    nvars = reduce(lambda s, o: s + o.basis.nvar_symm, objs, 0)
    Log("Number of variables (nvars) = %i" % nvars)

    offs = 1  # Reserve an index for the constant in the constraint
    for o in objs:
        o.basis.array_offset = offs
        offs += o.basis.nvar_symm

    # -------------

    Log('=' * 80)
    Log('PIXEL BASIS MODEL GENERATOR')
    Log('=' * 80)
    #   if nmodels == 0:
    #       Log( "No models requested." )
    #       return

    #---------------------------------------------------------------------------
    # Decide which priors to use. The initial list is the list of default
    # priors. The user can then modify this list be selecting which priors
    # should be included from the entire list, or which ones should be excluded.
    #
    #---------------------------------------------------------------------------
    priors = def_priors

    if exc_priors:
        priors = filter(lambda x: x not in exc_priors, priors)

    if inc_priors:
        priors += filter(lambda x: x not in priors, inc_priors)

    Log('Priors:')
    for p in all_priors:
        Log('%10s %s' %
            ('[EXCLUDED]' if p not in priors else '', p.f.__name__))

    lp = filter(lambda x: x.where == 'object_prior', priors)
    gp = filter(lambda x: x.where == 'ensemble_prior', priors)

    #---------------------------------------------------------------------------
    # Initialize our model generator, the simplex.
    #---------------------------------------------------------------------------
    opts = env.model_gen_options
    opts['ncols'] = nvars
    if not opts.has_key('nthreads'):
        opts['nthreads'] = Environment.global_opts['ncpus']

    #mg = env.model_gen = env.model_gen_factory(env.model_gen_options)
    mg = env.model_gen = Samplex(**env.model_gen_options)

    #---------------------------------------------------------------------------
    # Apply the object priors
    #---------------------------------------------------------------------------
    Log('Applying Priors:')
    for o in objs:
        offs = o.basis.array_offset
        Log('array offset %i' % offs)
        if o.symm:
            symm = lambda x: symm_fold(o, x)
        else:
            symm = None
        for p in lp:
            leq = _expand_array(nvars, offs, mg.leq, symm)
            eq = _expand_array(nvars, offs, mg.eq, symm)
            geq = _expand_array(nvars, offs, mg.geq, symm)
            p.f(o, leq, eq, geq)

    #---------------------------------------------------------------------------
    # Apply the ensemble priors
    #---------------------------------------------------------------------------
    for p in gp:
        p.f(objs, nvars, mg.leq, mg.eq, mg.geq)

    #---------------------------------------------------------------------------
    #---------------------------------------------------------------------------

    global acc_objpriors
    global acc_enspriors
    del acc_objpriors[:]
    del acc_enspriors[:]

    acc_objpriors += lp
    acc_enspriors += gp
Ejemplo n.º 17
0
def sigp(objmodel,
         lightC,
         lpars,
         aperture,
         beta,
         alphalim=3.5,
         interpnts=None,
         intpnts=None,
         rspan=None):

    obj, data = objmodel

    arcsec2kpc = convert('arcsec to kpc', 1, obj.dL, data['nu'])

    rho3d(objmodel,
          alphalim=alphalim,
          interpnts=interpnts,
          intpnts=intpnts,
          rspan=rspan)
    r = data['rho3d:r']
    rho = data['rho3d:rho']
    drho = data['rho3d:drho']
    mass3d = data['rho3d:mass']
    mass2d = data['M(<R)']
    R = data['R']['kpc']
    sigma = data['Sigma(R)']

    aperture_phys = aperture * arcsec2kpc
    lpars_phys = lpars[:]
    #lpars_phys[1] = lpars_phys[1] * arcsec2kpc

    #print aperture_phys, lpars_phys

    #-------------------------------------------------------------------------
    # Calculate the integral to obtain sigp(r)
    #-------------------------------------------------------------------------
    #units of M=Msun, L=kpc, V=km/s:
    Gsp = 6.67e-11 * 1.989e30 / 3.086e19
    #light.set_pars(lpars_phys)
    light = lightC(lpars_phys, intpnts)

    sigp = sigpsolve(r, rho, mass3d, R, sigma, mass2d, integrator, intpnts,
                     alphalim, Gsp, light, beta) / 1000
    sigpsing = sigpsingle(r, sigp, light, aperture_phys, integrator)

    #rhint   = rhoint(imagemin,imagemax,alphalim,r,rho,mass3d,r)

    #   sigpa     = sigpsolve(r,rhoa,mass3da,integrator,intpnts,alphalim,Gsp,light,lpars_phys,beta) / 1000
    #   sigpsinga = sigpsingle(r,sigpa,light,lpars_phys,aperture_phys,integrator)
    #   rhinta   = rhoint(imagemin,imagemax,alphalim,r,rhoa,mass3da,r)
    #   drhoa    = dlnrhodlnr(r,rhinta)

    #data['sigp:rhoint'   ] = rhint
    data['sigp:sigp'] = sigp
    data['sigp:sigp_sing'] = sigpsing
    data['sigp:scale-factor'] = lpars_phys[1]

    #   data['sigp:rhoa'      ] = rhoa
    #   data['sigp:rhointa'   ] = rhinta
    #   data['sigp:drhoa'     ] = drhoa
    #   data['sigp:mass3da'   ] = mass3da
    #   data['sigp:sigpa'     ] = sigpa
    #   data['sigp:sigp_singa'] = sigpsinga

    #print data['R_phys']
    #print data['sigma_phys']
    #print data['encmass_phys']

    Log('Final rms mean projected vel. dispersion: %f' % sigpsing)
Ejemplo n.º 18
0
def rwalk_burnin(id, nmodels, burnin_len, samplex, q, cmdq, ackq, vec, twiddle, eval,evec, seed):

    lclq = []

    S   = np.zeros(samplex.eqs.shape[0])
    S0  = np.zeros(samplex.eqs.shape[0])

    vec  = vec.copy('A')
    eval = eval.copy('A')
    evec = evec.copy('A')
    eqs  = samplex.eqs.copy('A')

    accepted = 0
    rejected = 0

    offs = ' '*39
    Log( offs + 'STARTING rwalk_burnin THREAD %i' % id, overwritable=True)

    eqs[:,1:] = np.dot(samplex.eqs[:,1:], evec)
    #vec[:] = np.dot(evec.T, vec)
    I = np.eye(evec.shape[0]).copy('F')

    #csamplex.set_rwalk_seed(1 + id + samplex.random_seed)
    csamplex.set_rwalk_seed(1 + seed)

    log_time = time.clock()

    #t0=0
    #t1=time.clock()

    offs = ' '*36
    i = 0
    j=0
    while True:
        j+= 1

        #t0=time.clock()

        put_immediate = False
        done = False
        try:
            while not done:
                #if 1:
                cmd = cmdq.get()
                if cmd[0] == 'CONT':
                    break
#                   if lclq:
#                       q.put([id,lclq])
#                       lclq = []
#                   else:
#                       put_immediate = True
                elif cmd[0] == 'NEW DATA':
                    eval[:],evec[:],twiddle = cmd[1]
                    eqs[:,1:] = np.dot(samplex.eqs[:,1:], evec)
                    lclq = []
                    i=0
                elif cmd[0] == 'REQ TWIDDLE':
                    ackq.put(twiddle)
                elif cmd[0] == 'WAIT':
                    ackq.put('OK')
                elif cmd[0] == 'STOP':
                    done = True
                elif cmd[0] == 'RWALK':
                    done = True
                else:
                    print 'Unknown cmd:', cmd
        except QueueEmpty:
            pass

        if done:
            break

        while len(lclq) < 10:
            vec[:] = np.dot(evec.T, vec)

            #t1=time.clock()


            accepted = False
            while not accepted:
                Naccepted = 0
                Nrejected = 0

                Naccepted,Nrejected,t = csamplex.rwalk(samplex, eqs, vec,eval,S,S0, twiddle, Naccepted,Nrejected)

                r = Naccepted / (Naccepted + Nrejected)

                #-------------------------------------------------------------------
                # If the actual acceptance rate was OK then leave this loop,
                # otherwise change our step size twiddle factor to improve the rate.
                # Even if the accepance rate was OK, we adjust the twiddle but only
                # with a certain probability. This drives the acceptance rate to 
                # the specified one even if we are within the tolerance but doesn't
                # throw away the results if we are not so close. This allows for
                # a larger tolerance.
                #-------------------------------------------------------------------
                accepted =  np.abs(r - samplex.accept_rate) < samplex.accept_rate_tol

                state = 'B'
                if not accepted:
                    twiddle *= 1 + ((r-samplex.accept_rate) / samplex.accept_rate / 2)
                    #twiddle *= (r/samplex.accept_rate)
                    twiddle = max(1e-14,twiddle)
                    state = 'R' + state

                if time.clock() - log_time > 3:
                    msg = 'THREAD %3i]  %i/%i  %4.1f%% accepted  (%6i/%6i Acc/Rej)  twiddle %5.2f  time %5.3fs backlog %i' % (id, i, burnin_len, 100*r, Naccepted, Nrejected, twiddle, t, len(lclq))
                    Log( offs + '% 2s %s' % (state, msg), overwritable=True )
                    log_time = time.clock()

                #print ' '*36, '% 2s %s' % (state, msg)

            #print 'thread %i, %f' % (id,t1-t0)

            vec[:] = np.dot(evec, vec)

            if random() < np.abs(r - samplex.accept_rate)/samplex.accept_rate_tol:
                twiddle *= 1 + ((r-samplex.accept_rate) / samplex.accept_rate / 2)
                #twiddle *= (r/samplex.accept_rate)
                twiddle = max(1e-14,twiddle)

            assert np.all(vec >= 0), vec[vec < 0]
            #if np.any(vec < 0): sys.exit(0)

            samplex.project(vec)

            i += 1
#           if put_immediate:
#               q.put([id,[vec.copy('A')]])
#           else:
            lclq.append(vec.copy('A'))

        q.put([id,lclq,'BURNIN'])
        lclq = []



    time_begin = time.clock()
    if cmd[0] == 'RWALK':
        rwalk(id, nmodels, samplex, q, cmdq, vec, twiddle, eval, evec, seed=None)
    time_end = time.clock()

    cmd = cmdq.get()
    assert cmd[0] == 'STOP', cmd[0]
    ackq.put(['TIME', time_end-time_begin])
Ejemplo n.º 19
0
def report(env):
    Log('=' * 80)
    Log('COSMOLOGY')
    Log('=' * 80)
    Log(pp('Omega Matter = %.4g' % env.omega_matter, ''))
    Log(pp('Omega Lambda = %.4g' % env.omega_lambda, ''))
    if (hasattr(env.nu, '__len__') and any(env.nu)) or env.nu:
        Log(
            pp(
                'H0inv        = %s' %
                str_range(convert('nu to H0^-1 in Gyr', env.nu), '%.4g'),
                '[Gyr]'))
        Log(
            pp(
                'H0           = %s' %
                str_range(convert('nu to H0 in km/s/Mpc', env.nu), '%.4g'),
                '[km/s/Mpc]'))
    Log(pp('H0inv ref    = %s' % str_range(env.H0inv_ref, '%.4g'), '[Gyr]'))
    Log(pp('filled_beam  = %s' % env.filled_beam, ''))
    Log()
    Log('=' * 80)
    Log('OBJECTS')
    Log('=' * 80)
    H0inv_ref_as_nu = convert('H0^-1 in Gyr to nu', env.H0inv_ref)
    for i, o in enumerate(env.objects):
        Log(
            pp(
                '%i. %s at z=%.4g  Distance(Obs->Lens) = %.4f' %
                (i + 1, o.name, o.z, angdist(env, 0, o.z)), ''))
        if o.maprad:
            Log(pp('    Map radius       = %.4g' % o.maprad, '[arcsec]'))
            Log(
                pp(
                    '    Map radius       = %.4g (H0inv=%4.1f)' %
                    (convert('arcsec to kpc', o.maprad, o.dL,
                             H0inv_ref_as_nu), env.H0inv_ref), '[kpc]'))
        else:
            Log(pp('    Map radius       = Not specified', ''))
        #Log( pp('    Time scale            = %.4g' % o.scales['time'],    '[g days/arcsec^2]') )
        #Log( pp('    Angular distance      = %.4g' % o.scales['angdist'], '[g kpc/arcsec]') )
        #Log( pp('    Critical density      = %.4e' % convert('kappa to Msun/arcsec^2', 1, o.dL, '[Msun/arcsec^2]') )
        Log( pp('    Critical density = %.4e (H0inv=%.1f)' \
            % (convert('kappa to Msun/kpc^2', 1, o.dL, H0inv_ref_as_nu), env.H0inv_ref), '[Msun/kpc^2]') )
        #       if o.shear:
        #           pass
        #           #Log( pp('    Shear                 = %.4g' % o.shear.phi, '') )
        #       else:
        #           Log( pp('    NO SHEAR', '') )
        #           #Log( pp('    Shear                 = Not specified', '') )
        #       Log( pp('    Steepness             = %s' % str_range(o.steep, '%.4g'), '') )
        Log()
        for src in o.sources:
            Log('    Source at z=%.4f %s' %
                (src.z, '[NO IMAGES]' if len(src.images) == 0 else ''))
            Log(
                pp(
                    '        Distance (Obs->Src)  = %.4f' %
                    angdist(env, 0, src.z), '[arcsec]'))
            Log(
                pp(
                    '        Distance (Lens->Src) = %.4f' %
                    angdist(env, o.z, src.z), '[arcsec]'))
            Log(pp('        Dos/Dls              = %.4f' % src.zcap, ''))
            for img in src.images:
                Log('        Image at (% .3f,% .3f) : |*|=% 5.3f angle=% 8.3f parity=%s '
                    % (img.pos.real, img.pos.imag, abs(
                        img.pos), img.angle, img.parity_name))
            #for img in src.images:
            #    Log( '        Image at (% .3f,% .3f) : angle=% 8.3f parity=%s elongation=[%.4g,%.4g,%.4g]'
            #        % (img.pos.real, img.pos.imag, img.angle, img.parity_name, img.elongation[0], img.elongation[1], img.elongation[2]) )

    Log()
    Log('=' * 80)
    Log('MISCELLANEOUS')
    Log('=' * 80)
    Log('Graphics %s' %
        ('enabled' if Environment.global_opts['withgfx'] else 'disabled'))
    Log()
    Log('=' * 80)
    Log('SYSTEM')
    Log('=' * 80)
    Log('Number of CPUs detected = %i' %
        Environment.global_opts['ncpus_detected'])
    Log('Number of CPUs used     = %i' % Environment.global_opts['ncpus'])
    oo = Environment.global_opts['omp_opts']
    if oo:
        Log('OpenMP supported. Compiling with "%s"' %
            ' '.join(oo['extra_compile_args'] + oo['extra_link_args']))
    else:
        Log('OpenMP not supported.')
    Log()
Ejemplo n.º 20
0
def H0inv_plot(env, **kwargs):
    _hist(env, '1/H0', xlabel=r'$H_0^{-1}$ (Gyr)', **kwargs)
    return

    models = kwargs.pop('models', env.models)
    obj_index = kwargs.pop('obj_index', 0)
    key = kwargs.pop('key', 'accepted')
    xlabel = kwargs.pop('xlabel', r'$H_0^{-1}$ (Gyr)')
    ylabel = kwargs.pop('ylabel', r'Count')

    # select a list to append to based on the 'accepted' property.
    l = [[], [], []]
    for m in models:
        obj, data = m['obj,data'][
            0]  # For H0inv we only have to look at one model because the others are the same
        l[m.get(key, 2)].append(data['1/H0'])
        #l[2].append(data['kappa'][1])

    #print amin(l[2]), amax(l[2])

    not_accepted, accepted, notag = l

    #print 'H0inv_plot',H0s

    for d, s in zip(l, _styles):
        if d:
            #print len(d), d, np.ptp(d), np.sqrt(len(d))
            #pl.hist(d, bins=20, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'])
            pl.hist(d,
                    bins=np.ptp(d) // 1 + 1,
                    histtype='step',
                    edgecolor=s['c'],
                    zorder=s['z'],
                    label=s['label'],
                    **kwargs)

    #if not_accepted or accepted:
    #pl.legend()

    pl.axvline(13.7, c='k', ls=':', zorder=2)

    pl.xlabel(xlabel)
    pl.ylabel(ylabel)

    if accepted or not not_accepted:
        if accepted:
            h = np.array(accepted)
        else:
            h = np.array(accepted + notag)

        hs = np.sort(h)
        l = len(hs)

        m = hs[l * 0.50]
        u = hs[l * (0.50 + 0.341)]
        l = hs[l * (0.50 - 0.341)]
        #u = hs[l * 0.68]
        #l = hs[l * 0.32]

        pl.axvline(m, c='r', ls='-', zorder=2)
        pl.axvline(u, c='g', ls='-', zorder=2)
        pl.axvline(l, c='g', ls='-', zorder=2)

        Log('H0inv_plot: ', m, u, l)
        Log('H0inv_plot: ', m, (u - m), (m - l))
    else:
        Log("H0inv_plot: No H0inv values accepted")
Ejemplo n.º 21
0
    def next(self, nsolutions=None):

        Log( '=' * 80 )
        Log( 'Simplex Random Walk' )
        Log( '=' * 80 )

        Log( "    %i equations" % len(self.eq_list) )

        Log( "%6s %6s %6s\n%6i %6i %6i" 
            % (">=", "<=", "=", self.geq_count, self.leq_count, self.eq_count) )


        if nsolutions == 0: return

        assert nsolutions is not None

        dim = self.nVars
        dof = dim - self.eq_count

        burnin_len  = max(10, int(self.burnin_factor * dof))
        redo        = max(100,  int((dof ** self.redo_exp) * self.redo_factor))

        nmodels = nsolutions
        nthreads = self.nthreads

        self.stride = int(dim+1)

        n_stored = 0
        self.dim = dim
        self.dof = dof
        self.redo = redo

        self.burnin_len = burnin_len

        accept_rate     = self.accept_rate
        accept_rate_tol = self.accept_rate_tol

        store = np.zeros((dim, 1+burnin_len), order='Fortran', dtype=np.float64)
        newp = np.zeros(dim, order='C', dtype=np.float64)
        eval  = np.zeros(dim, order='C', dtype=np.float64)
        evec  = np.zeros((dim,dim), order='F', dtype=np.float64)

        self.eqs = np.zeros((self.eqn_count+dim,dim+1), order='C', dtype=np.float64)
        for i,[c,e] in enumerate(self.eq_list):
            self.eqs[i,:] = e
        for i in xrange(dim):
            self.eqs[self.eqn_count+i,1+i] = 1

        self.dist_eqs = np.zeros((self.eqn_count-self.eq_count,dim+1), order='C', dtype=np.float64)
        i=0
        for c,e in self.eq_list:
            if c == 'eq':
                continue
            elif c == 'leq':
                p = e
            elif c == 'geq':
                p = -e
            self.dist_eqs[i,:] = p
            i += 1

        Log( 'Using lpsolve %s' % lpsolve('lp_solve_version') )
        Log( "random seed = %s" % self.random_seed )
        Log( "threads = %s" % self.nthreads )
        Log( "acceptence rate = %s" % self.accept_rate )
        Log( "acceptence rate tolerance = %s" % self.accept_rate_tol )
        Log( "dof = %s" % self.dof)
        Log( "sample distance = max(100,%s * %s^%s) = %s" % (self.redo_factor, self.dof, self.redo_exp, redo) )
        Log( "starting twiddle = %s" % self.twiddle )
        Log( "burn-in length = %s" % burnin_len )

        time_begin_next = time.clock()

        #-----------------------------------------------------------------------
        # Create pseudo inverse matrix to reproject samples back into the
        # solution space.
        #-----------------------------------------------------------------------
        P = np.eye(dim) 
        if self.eq_count > 0:
            self.A = np.zeros((self.eq_count, dim), order='C', dtype=np.float64)
            self.b = np.zeros(self.eq_count, order='C', dtype=np.float64)
            for i,[c,e] in enumerate(self.eq_list[:self.eq_count]):
                self.A[i] = e[1:]
                self.b[i] = e[0]
            self.Apinv = pinv(self.A)
            P -= np.dot(self.Apinv, self.A)
        else:
            self.A = None
            self.B = None
            self.Apinv = None

        ev, evec = eigh(P)
        #-----------------------------------------------------------------------


        #-----------------------------------------------------------------------
        # Find a point that is completely inside the simplex
        #-----------------------------------------------------------------------
        Log('Finding first inner point')
        time_begin_inner_point = time.clock()
        self.inner_point(newp)
        time_end_inner_point = time.clock()
        ok,fail_count = self.in_simplex(newp, eq_tol=1e-12, tol=0, verbose=1)
        assert ok

        self.avg0 = newp

#       eqs  = self.eqs.copy('A')
#       eqs[:,1:] = np.dot(self.eqs[:,1:], evec)

#       print newp

#       S = zeros(self.eqs.shape[0])
#       newp[:] = np.dot(evec.T, newp)
#       newp0 = newp.copy()
#       steps = newp.copy()
#       for q in range(100):
#           csamplex.refine_center(self, eqs, newp, ev, S, steps)
#           d = newp - newp0
#           #print d
#           print norm(d)
#           #print
#           newp0 = newp.copy()

#       #assert 0
#       newp[:] = np.dot(evec, newp)


        store[:,0] = newp
        n_stored = 1

        q = MP.Queue()

        #-----------------------------------------------------------------------
        # Estimate the eigenvectors of the simplex
        #-----------------------------------------------------------------------
        Log('Estimating eigenvectors')
        time_begin_est_eigenvectors = time.clock()
        self.measured_ev(newp, ev, eval, evec)
        time_end_est_eigenvectors = time.clock()

        #-----------------------------------------------------------------------
        # Now we can start the random walk
        #-----------------------------------------------------------------------

        Log( "Getting solutions" )

        ran_set_seed(self.random_seed)
        seeds = np.random.choice(1000000*nthreads, nthreads, replace=False)

        #-----------------------------------------------------------------------
        # Launch the threads
        #-----------------------------------------------------------------------
        threads = []
        models_per_thread = nmodels // nthreads
        models_under      = nmodels - nthreads*models_per_thread
        id,N = 0,0
        while id < nthreads and N < nmodels:
            n = models_per_thread
            if id < models_under:
                n += 1
            assert n > 0
            Log( 'Thread %i gets %i' % (id,n) )
            cmdq = MP.Queue()
            ackq = MP.Queue()

            thr = MP.Process(target=rwalk_burnin, 
                             args=(id, n, int(np.ceil(burnin_len/nthreads)), self, q, cmdq, ackq, newp, self.twiddle, eval.copy('A'), evec.copy('A'), seeds[id]))
            threads.append([thr,cmdq,ackq])
            N += n
            id += 1

        assert N == nmodels

        for thr,cmdq,_ in threads:
            thr.daemon=True
            thr.start()
            cmdq.put(['CONT'])

        def drainq(q):
            try:
                while True:
                    q.get(block=False)
            except QueueEmpty:
                pass

        def pause_threads(threads):
            for _,cmdq,ackq in threads:
                cmdq.put(['WAIT'])
                assert ackq.get() == 'OK'

        def adjust_threads(i, cont_cmd):
            pause_threads(threads)
            drainq(q)
            Log( 'Computing eigenvalues... [%i/%i]' % (i, burnin_len) )
            self.compute_eval_evec(store, eval, evec, n_stored)

            # new twiddle <-- average twiddle
            t = 0
            for _,cmdq,ackq in threads:
                cmdq.put(['REQ TWIDDLE'])
                t += ackq.get()
            t /= len(threads)

            Log( 'New twiddle %f' % t )
            for _,cmdq,_ in threads:
                cmdq.put(['NEW DATA', [eval.copy('A'), evec.copy('A'), t]])
                cmdq.put([cont_cmd])

        #-----------------------------------------------------------------------
        # Burn-in
        #-----------------------------------------------------------------------
        time_begin_burnin = time.clock()
        compute_eval_window = 2 * self.dof
        j = 0
        k = -1
        while n_stored < burnin_len+1:
            #for i in xrange(burnin_len):
            k,vecs,phase = q.get()

            #print 'Received ', len(vecs), ' from ', k
            for vec in vecs:
                j += 1
                store[:, n_stored] = vec
                n_stored += 1
                if n_stored == burnin_len+1: break

                if j == compute_eval_window:
                    j = 0
                    adjust_threads(i+1,'CONT')
                    compute_eval_window = int(0.1*burnin_len + 1)
                    break

            if j != 0 and len(threads) < compute_eval_window:
                threads[k][1].put(['CONT'])

        time_end_burnin = time.clock()

        #-----------------------------------------------------------------------
        # Actual random walk
        #-----------------------------------------------------------------------
        time_begin_get_models = time.clock()
        adjust_threads(burnin_len, 'RWALK')
        i=0
        while i < nmodels:
            k,vec,phase = q.get()
            if phase != 'RWALK': continue
            t = np.zeros(dim+1, order='Fortran', dtype=np.float64)
            t[1:] = vec
            i += 1
            Log( '%i models left to generate' % (nmodels-i), overwritable=True)
            yield t

        time_end_get_models = time.clock()

        #-----------------------------------------------------------------------
        # Stop the threads and get their running times.
        #-----------------------------------------------------------------------
        time_threads = []
        for thr,cmdq,ackq in threads:
            cmdq.put(['STOP'])
            m,t = ackq.get()
            assert m == 'TIME'
            time_threads.append(t)
            #thr.terminate()

        time_end_next = time.clock()

        max_time_threads = np.amax(time_threads) if time_threads else 0
        avg_time_threads = np.mean(time_threads) if time_threads else 0

        Log( '-'*80 )
        Log( 'SAMPLEX TIMINGS' )
        Log( '-'*80 )
        Log( 'Initial inner point    %.2fs' % (time_end_inner_point - time_begin_inner_point) )
        Log( 'Estimate eigenvectors  %.2fs' % (time_end_est_eigenvectors - time_begin_est_eigenvectors) )
        Log( 'Burn-in                %.2fs' % (time_end_burnin - time_begin_burnin) )
        Log( 'Modeling               %.2fs' % (time_end_get_models - time_begin_get_models) )
        Log( 'Max/Avg thread time    %.2fs %.2fs' % (max_time_threads, avg_time_threads) )
        Log( 'Total wall-clock time  %.2fs' % (time_end_next - time_begin_next) )
        Log( '-'*80 )
Ejemplo n.º 22
0
def H0_plot(env, **kwargs):
    _hist(env, 'H0', xlabel=r'$H_0$ (km/s/Mpc)', **kwargs)
    return

    models = kwargs.pop('models', env.models)
    obj_index = kwargs.pop('obj_index', 0)
    key = kwargs.pop('key', 'accepted')

    # select a list to append to based on the 'accepted' property.
    l = [[], [], []]
    for m in models:
        obj, data = m['obj,data'][
            obj_index]  # For H0 we only have to look at one model because the others are the same
        l[m.get(key, 2)].append(data['H0'])
        #print 'nu', data['nu']
        #l[2].append(data['kappa'][1])

    #print amin(l[2]), amax(l[2])

    not_accepted, accepted, notag = l

    #print 'H0_plot',H0s

    for d, s in zip(l, _styles):
        if d:
            #print len(d), d
            #pl.hist(d, bins=20, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'])
            pl.hist(d,
                    bins=np.ptp(d) // 1 + 1,
                    histtype='step',
                    edgecolor=s['c'],
                    zorder=s['z'],
                    label=s['label'],
                    **kwargs)

    if not_accepted or accepted:
        pl.legend()

    #pl.axvline(72, c='k', ls=':', zorder = 2)

    pl.xlabel(_H0_xlabel)
    pl.ylabel(r'Count')

    if accepted or not not_accepted:
        if accepted:
            h = np.array(accepted)
        else:
            h = np.array(accepted + notag)

        hs = np.sort(h)
        l = len(hs)

        m = hs[l * 0.50]
        u = hs[l * (0.50 + 0.341)]
        l = hs[l * (0.50 - 0.341)]

        pl.axvline(m, c='r', ls='-', zorder=2)
        pl.axvline(u, c='g', ls='-', zorder=2)
        pl.axvline(l, c='g', ls='-', zorder=2)

        Log('H0_plot: %f %f %f' % (m, u, l))
        Log('H0_plot: %f %f %f' % (m, (u - m), (m - l)))
    else:
        Log("H0_plot: No H0 values accepted")

    pl.xlim(xmin=0)
    pl.xlim(xmax=pl.xlim()[1] + 0.01 * (pl.xlim()[1] - pl.xlim()[0]))
    pl.ylim(ymax=pl.ylim()[1] + 0.01 * (pl.ylim()[1] - pl.ylim()[0]))
Ejemplo n.º 23
0
def _hist(env, data_key, **kwargs):

    models = kwargs.pop('models', env.models)
    obj_index = kwargs.pop('obj_index', 0)
    key = kwargs.pop('key', 'accepted')
    label = kwargs.pop('label', None)
    color = kwargs.pop('color', None)
    xlabel = kwargs.pop('xlabel', data_key)
    ylabel = kwargs.pop('ylabel', r'Count')
    sigma = kwargs.pop('sigma', '1sigma')
    mark_sigma = kwargs.pop('mark_sigma', True)

    # select a list to append to based on the 'accepted' property.
    l = [[], [], []]
    for m in models:
        obj, data = m['obj,data'][
            obj_index]  # For H0 we only have to look at one model because the others are the same
        if data.has_key(data_key):
            l[m.get(key, 2)].append(data[data_key])
        #print 'nu', data['nu']
        #l[2].append(data['kappa'][1])

    #print amin(l[2]), amax(l[2])

    not_accepted, accepted, notag = l

    #print 'H0_plot',H0s

    for d, s in zip(l, _styles):
        kw = kwargs.copy()
        if d:
            #print len(d), d, np.ptp(d), np.sqrt(len(d))
            kw.setdefault('bins', int(np.ptp(d) // 1) + 1)
            kw.setdefault('histtype', 'step')
            #print len(d), d
            #pl.hist(d, bins=20, histtype='step', edgecolor=s['c'], zorder=s['z'], label=s['label'])
            pl.hist(d,
                    edgecolor=s['c'] if color is None else color,
                    zorder=s['z'],
                    label=s['label'] if label is None else label,
                    **kw)

    if not_accepted or label:
        pl.legend()

    if mark_sigma:
        if accepted or notag:
            if accepted:
                h = np.array(accepted)
            else:
                h = np.array(notag)

            m, u, l = dist_range(h, sigma=sigma)

            pl.axvline(m, c='r', ls='-', zorder=2)
            pl.axvline(u, c='g', ls='-', zorder=2)
            pl.axvline(l, c='g', ls='-', zorder=2)

            Log('%s: %f %f %f' % (data_key, m, u, l))
            Log('%s: %f +/- %f %f' % (data_key, m, (u - m), (m - l)))
        else:
            Log("%s: No H0 values accepted" % data_key)

    #pl.axvline(72, c='k', ls=':', zorder = 2)

    pl.xlabel(xlabel)
    pl.ylabel(ylabel)

    pl.xlim(xmax=pl.xlim()[1] + 0.01 * (pl.xlim()[1] - pl.xlim()[0]))
    pl.ylim(ymax=pl.ylim()[1] + 0.01 * (pl.ylim()[1] - pl.ylim()[0]))
Ejemplo n.º 24
0
def _data_plot(models, X, Y, **kwargs):
    with_legend = False
    use = [0, 0, 0]

    if isinstance(X, basestring): X = [X, None]
    if isinstance(Y, basestring): Y = [Y, None]

    x_prop, x_units = X
    y_prop, y_units = Y

    ret_list = []

    every = kwargs.pop('every', 1)
    upto = kwargs.pop('upto', len(models))
    mark_images = kwargs.pop('mark_images', True)
    hilite_model = kwargs.pop('hilite_model', None)
    hilite_color = kwargs.pop('hilite_color', 'm')
    yscale = kwargs.pop('yscale', 'log')
    xscale = kwargs.pop('xscale', 'linear')
    xlabel = kwargs.pop('xlabel', None)
    ylabel = kwargs.pop('ylabel', None)

    kwargs.setdefault('color', 'k')
    kwargs.setdefault('marker', '.')
    kwargs.setdefault('ls', '-')

    normal_kw = {'zorder': 0, 'drawstyle': 'steps', 'alpha': 1.0}
    hilite_kw = {
        'zorder': 1000,
        'drawstyle': 'steps',
        'alpha': 1.0,
        'lw': 4,
        'ls': '--'
    }
    accepted_kw = {'zorder': 500, 'drawstyle': 'steps', 'alpha': 0.5}

    normal = []
    hilite = []
    accepted = []
    #imgs = set()
    imgs = defaultdict(set)
    xmin, xmax = np.inf, -np.inf
    ymin, ymax = np.inf, -np.inf

    objplot = defaultdict(dict)
    for mi in xrange(0, upto, every):
        m = models[mi]

        si = m.get('accepted', 2)
        tag = ''
        if si == False: tag = 'rejected'
        if si == True: tag = 'accepted'

        for [obj, data] in m['obj,data']:

            try:
                xs = data[x_prop][x_units]
                ys = data[y_prop][y_units]

                xlabel = _axis_label(xs, x_units) if not xlabel else None
                ylabel = _axis_label(ys, y_units) if not ylabel else None

                objplot[obj].setdefault(tag, {'ys': [], 'xs': None})
                objplot[obj][tag]['ys'].append(ys)
                objplot[obj][tag]['xs'] = xs

                #objplot[obj].setdefault('%s:xs'%tag, xs)
                #objplot[obj].setdefault('%s:ymax'%tag, ys)
                #objplot[obj].setdefault('%s:ymin'%tag, ys)
                #objplot[obj].setdefault('%s:ysum'%tag, np.zeros_like(ys))
                #objplot[obj].setdefault('%s:count'%tag, 0)

                #objplot[obj]['%s:ymax'%tag]  = np.amax((objplot[obj]['%s:ymax'%tag], ys), axis=0)
                #objplot[obj]['%s:ymin'%tag]  = np.amin((objplot[obj]['%s:ymin'%tag], ys), axis=0)
                #objplot[obj]['%s:ysum'%tag] += ys
                #objplot[obj]['%s:count'%tag] += 1

                if mark_images:
                    for i, src in enumerate(obj.sources):
                        for img in src.images:
                            imgs[i].add(
                                convert('arcsec to %s' % x_units,
                                        np.abs(img.pos), obj.dL, data['nu']))

            except KeyError as bad_key:
                Log("Missing information for object %s with key %s. Skipping plot."
                    % (obj.name, bad_key))
                continue

            use[si] = 1

            s = _styles[si]

            #xmin, xmax = min(xmin, amin(data[X])), max(xmax, amax(data[X]))
            #ymin, ymax = min(ymin, amin(data[Y])), max(ymax, amax(data[Y]))

    for i, tag in enumerate(['rejected', 'accepted', '']):
        for k, v in objplot.iteritems():
            if tag not in v: break

            ys = np.array(v[tag]['ys'])
            xs = np.repeat(np.atleast_2d(v[tag]['xs']), len(ys), axis=0)

            ret_list.append([xs, ys])
            if tag == 'rejected':
                pl.plot(xs, ys, c=_styles[0]['c'], zorder=_styles[0]['z'])
            else:
                pl.plot(xs.T, ys.T, **kwargs)

#   return

    pl.yscale(yscale)
    pl.xscale(xscale)

    si = style_iterator()
    for k, v in imgs.iteritems():
        lw, ls, c = si.next()
        for img_pos in v:
            pl.axvline(img_pos, c=c, ls=ls, lw=lw, zorder=-2, alpha=0.5)


#   if use[0] or use[1]:
#       lines  = [s['line']  for s,u in zip(_styles, use) if u]
#       labels = [s['label'] for s,u in zip(_styles, use) if u]
#       pl.legend(lines, labels)

    if use[0]:
        lines = [_styles[0]['line']]
        labels = [_styles[0]['label']]
        pl.legend(lines, labels)

    #axis('scaled')
    if xlabel: pl.xlabel(xlabel)
    if ylabel: pl.ylabel(ylabel)
    pl.xlim(xmin=pl.xlim()[0] - 0.01 * (pl.xlim()[1] - pl.xlim()[0]))
    #pl.ylim(0, ymax)

    return ret_list
Ejemplo n.º 25
0
def _data_error_plot(models, X, Y, **kwargs):
    with_legend = False
    use = [0, 0, 0]

    if isinstance(X, basestring): X = [X, None]
    if isinstance(Y, basestring): Y = [Y, None]

    x_prop, x_units = X
    y_prop, y_units = Y

    ret_list = []

    every = kwargs.pop('every', 1)
    upto = kwargs.pop('upto', len(models))
    mark_images = kwargs.pop('mark_images', True)
    hilite_model = kwargs.pop('hilite_model', None)
    hilite_color = kwargs.pop('hilite_color', 'm')
    yscale = kwargs.pop('yscale', 'log')
    xscale = kwargs.pop('xscale', 'linear')
    xlabel = kwargs.pop('xlabel', None)
    ylabel = kwargs.pop('ylabel', None)
    sigma = kwargs.pop('sigma', '1sigma')

    kwargs.setdefault('color', 'k')
    kwargs.setdefault('marker', '.')
    kwargs.setdefault('ls', '-')

    normal_kw = {'zorder': 0, 'drawstyle': 'steps', 'alpha': 1.0}
    hilite_kw = {
        'zorder': 1000,
        'drawstyle': 'steps',
        'alpha': 1.0,
        'lw': 4,
        'ls': '--'
    }
    accepted_kw = {'zorder': 500, 'drawstyle': 'steps', 'alpha': 0.5}

    normal = []
    hilite = []
    accepted = []
    #imgs = set()
    imgs = defaultdict(set)
    xmin, xmax = np.inf, -np.inf
    ymin, ymax = np.inf, -np.inf

    objplot = defaultdict(dict)
    for mi in xrange(0, upto, every):
        m = models[mi]

        si = m.get('accepted', 2)
        #print si
        tag = ''
        if si == False: tag = 'rejected'
        if si == True: tag = 'accepted'

        for [obj, data] in m['obj,data']:

            try:
                xs = data[x_prop][x_units]
                ys = data[y_prop][y_units]

                xlabel = _axis_label(xs, x_units) if not xlabel else xlabel
                ylabel = _axis_label(ys, y_units) if not ylabel else ylabel

                objplot[obj].setdefault(tag, {'ys': [], 'xs': None})
                objplot[obj][tag]['ys'].append(ys)
                objplot[obj][tag]['xs'] = xs

                #objplot[obj].setdefault('%s:xs'%tag, xs)
                #objplot[obj].setdefault('%s:ymax'%tag, ys)
                #objplot[obj].setdefault('%s:ymin'%tag, ys)
                #objplot[obj].setdefault('%s:ysum'%tag, np.zeros_like(ys))
                #objplot[obj].setdefault('%s:count'%tag, 0)

                #objplot[obj]['%s:ymax'%tag]  = np.amax((objplot[obj]['%s:ymax'%tag], ys), axis=0)
                #objplot[obj]['%s:ymin'%tag]  = np.amin((objplot[obj]['%s:ymin'%tag], ys), axis=0)
                #objplot[obj]['%s:ysum'%tag] += ys
                #objplot[obj]['%s:count'%tag] += 1

                if mark_images:
                    for i, src in enumerate(obj.sources):
                        for img in src.images:
                            imgs[i].add(
                                convert('arcsec to %s' % x_units,
                                        np.abs(img.pos), obj.dL, data['nu']))

            except KeyError as bad_key:
                Log("Missing information for object %s with key %s. Skipping plot."
                    % (obj.name, bad_key))
                continue

            use[si] = 1

            s = _styles[si]

            #xmin, xmax = min(xmin, amin(data[X])), max(xmax, amax(data[X]))
            #ymin, ymax = min(ymin, amin(data[Y])), max(ymax, amax(data[Y]))

    for i, tag in enumerate(['rejected', 'accepted', '']):
        for k, v in objplot.iteritems():
            if tag not in v: break
            #if not v.has_key('%s:count'%tag): break

            avg, errp, errm = dist_range(v[tag]['ys'], sigma=sigma)
            errp = errp - avg
            errm = avg - errm
            #s = np.sort(v[tag]['ys'], axis=0)
            #avg = s[len(s)//2] if len(s)%2==1 else (s[len(s)//2] + s[len(s)//2+1])/2
            #print s
            #avg = np.median(v[tag]['ys'], axis=0)
            #print avg
            #print np.median(v[tag]['ys'], axis=1)
            #errp = s[len(s) * .841] - avg
            #errm = avg - s[len(s) * .159]

            #errp = np.amax(v[tag]['ys'], axis=0) - avg
            #errm = avg - np.amin(v[tag]['ys'], axis=0)
            #errp = errm = np.std(v[tag]['ys'], axis=0, dtype=np.float64)
            xs = v[tag]['xs']

            #           print [x[1] for x in v[tag]['ys']]
            #           pl.hist([x[1] for x in v[tag]['ys']])
            #           break

            #avg = v['%s:ysum'%tag] / v['%s:count'%tag]
            #errp = v['%s:ymax'%tag]-avg
            #errm = avg-v['%s:ymin'%tag]
            #errm = errp = np.std(

            #print len(v['xs'])
            #print len(avg)
            #assert 0
            #print len(xs)
            #print len(avg)

            ret_list.append([xs, avg, errm, errp])
            yerr = (errm, errp) if not np.all(errm == errp) else None
            if tag == 'rejected':
                pl.errorbar(xs,
                            avg,
                            yerr=yerr,
                            c=_styles[0]['c'],
                            zorder=_styles[0]['z'])
            else:
                pl.errorbar(xs, avg, yerr=yerr, **kwargs)

#   return

    pl.xscale(xscale)
    pl.yscale(yscale)

    si = style_iterator()
    for k, v in imgs.iteritems():
        lw, ls, c = si.next()
        for img_pos in v:
            pl.axvline(img_pos, c=c, ls=ls, lw=lw, zorder=-2, alpha=0.5)


#   if use[0] or use[1]:
#       lines  = [s['line']  for s,u in zip(_styles, use) if u]
#       labels = [s['label'] for s,u in zip(_styles, use) if u]
#       pl.legend(lines, labels)

    if use[0]:
        lines = [_styles[0]['line']]
        labels = [_styles[0]['label']]
        pl.legend(lines, labels)

    #axis('scaled')
    if xlabel: pl.xlabel(xlabel)
    if ylabel: pl.ylabel(ylabel)
    pl.xlim(xmin=pl.xlim()[0] - 0.01 * (pl.xlim()[1] - pl.xlim()[0]))
    #pl.ylim(0, ymax)

    return ret_list