def plotStar(star, time='orbit', color='k', alpha=1.0):
        # Choices for time are
        #   'orbit' = plot the whole orbit
        #   'obs' = duration of observation
        #   'extend' = duration of observation plus out to 2017.5
        orb = star.orbit

        # Determine time steps
        x = star.getArrayAllEpochs('x')
        y = star.getArrayAllEpochs('y')

        if (time == 'orbit'):
            t = na.arange(orb.t0 + 0.01,
                          orb.t0 + orb.p + 0.11,
                          orb.p / 200.0,
                          type=na.Float)
        if (time == 'obs'):
            idx = (na.where(x != -1000))[0]
            t = na.arange(math.floor(star.years[idx[0]]),
                          math.ceil(star.years[idx[-1]]),
                          0.1,
                          type=na.Float)
        if (time == 'extend'):
            idx = (na.where(x != -1000))[0]
            t = na.arange(math.floor(star.years[idx[0]]),
                          2017.5,
                          0.1,
                          type=na.Float)

        (r, v, a) = orb.kep2xyz(t, mass=mass, dist=dist)

        pp = pylab.plot(r[:, 0], r[:, 1], color=color, alpha=alpha)
        # To plot no line and just the data points:
        #pp = pylab.plot([], [],color=color)

        ##
        ## Now plot the actual data points
        ##
        # Load from points files
        if yrlyPts:
            # Just take the points from 'r' array spaced about a year apart
            roundT = array([int(round(t[qq])) for qq in range(len(t))])
            firstT = roundT[0]
            tPts = [
                roundT.searchsorted(firstT + zz + 1)
                for zz in range(roundT[-1] - firstT)
            ]
            c = pylab.scatter(r[tPts, 0],
                              r[tPts, 1],
                              20.0,
                              color,
                              marker='o',
                              faceted=False)
        else:
            # Get the actual data
            c = pylab.scatter(x, y, 20.0, color, marker='o', faceted=False)

        c.set_alpha(alpha)

        return pp
Exemple #2
0
def get_edge(l,flux,err,edge,lowl,highl):
    """Fits a photoelectric edge to the section of the l/flux spectrum
    falling between lowl and highl.
    l: wavelength in angstrom
    flux: some measure of flux (erg/s/cm2/A?)
    edge: one of the edges in data (O,Ne,FeL,Si...)"""
    global l_edge,sig,sigma_K_Et,E_K
    sigma_K_Et,E_K = data[edge]
    l_edge = 2.998e18/(E_K *1.602e-19/6.626e-34)
    l=num.array(l)
    indeces=(l>lowl)*(l<highl)
    if edge=='FeL':
        l_edge=17.52
        indeces *= (l>17.2)*(l<17.56)-1
    elif edge=='Ne':
        l_edge=14.31    #http://space.mit.edu/HETG/Reports/HETG_Report_SciFeb03.html
        indeces *= (l<14.44)*(l>14.66)-1
        indeces *= (l>13.4)*(l<13.5)-1
    elif edge=='Mg':
        l_edge=9.5
    elif edge=='Si':
        l_edge=6.72
        indeces *= (l<6.73)*(l>6.61)-1
    elif edge=="O":
        l_edge=23.1
        indeces *= (l<23.6)*(l>23.25)-1
        indeces *= (l<23.1)*(l>22.5)-1
    lne = l[num.where(indeces)]
    fne = flux[num.where(indeces)]
    ene = err[num.where(indeces)]
    sig = sigma_K_Et*1e-19
    return lne,fne,ene
Exemple #3
0
def plotFitRms(root, polyroot, gcfitdir):
    s = starset.StarSet(root)
    s.loadPolyfit(polyroot, arcsec=1, accel=0)

    years = s.stars[0].years
    fitPx = s.getArray('fitXv.p')
    fitVx = s.getArray('fitXv.v')
    fitPy = s.getArray('fitYv.p')
    fitVy = s.getArray('fitYv.v')
    t0x = s.getArray('fitXv.t0')
    t0y = s.getArray('fitYv.t0')

    rmsX = na.zeros(len(s.stars), type=na.Float)
    rmsY = na.zeros(len(s.stars), type=na.Float)
    rms = na.zeros(len(s.stars), type=na.Float)
    cnt = na.zeros(len(s.stars), type=na.Int)

    for ee in range(len(years)):
        dtX = years[ee] - t0x
        dtY = years[ee] - t0y

        xfit = fitPx + (dtX * fitVx)
        yfit = fitPy + (dtY * fitVy)

        x = s.getArrayFromEpoch(ee, 'x')
        y = s.getArrayFromEpoch(ee, 'y')
        xpix = s.getArrayFromEpoch(ee, 'xpix')
        ypix = s.getArrayFromEpoch(ee, 'ypix')

        diffx = xfit - x
        diffy = yfit - y
        diff = na.sqrt(diffx**2 + diffy**2)

        idx = (na.where(xpix > -999))[0]

        rmsX[idx] += diffx**2
        rmsY[idx] += diffy**2
        rms[idx] += diff**2
        cnt[idx] += 1

    rmsX = na.sqrt(rmsX / cnt) * 1000.0
    rmsY = na.sqrt(rmsY / cnt) * 1000.0
    rms = na.sqrt(rms / cnt) * 1000.0

    mag = s.getArray('mag')
    x = s.getArray('x')
    y = s.getArray('y')
    r = na.sqrt(x**2 + y**2)

    idx = (na.where(mag < 15))[0]

    p.clf()
    p.semilogy(r[idx], rms[idx], 'k.')
def plotFitRms(root, polyroot, gcfitdir):
    s = starset.StarSet(root)
    s.loadPolyfit(polyroot, arcsec=1, accel=0)

    years = s.stars[0].years
    fitPx = s.getArray('fitXv.p')
    fitVx = s.getArray('fitXv.v')
    fitPy = s.getArray('fitYv.p')
    fitVy = s.getArray('fitYv.v')
    t0x = s.getArray('fitXv.t0')
    t0y = s.getArray('fitYv.t0')

    rmsX = na.zeros(len(s.stars), type=na.Float)
    rmsY = na.zeros(len(s.stars), type=na.Float)
    rms = na.zeros(len(s.stars), type=na.Float)
    cnt = na.zeros(len(s.stars), type=na.Int)

    for ee in range(len(years)):
        dtX = years[ee] - t0x
        dtY = years[ee] - t0y

        xfit = fitPx + (dtX * fitVx)
        yfit = fitPy + (dtY * fitVy)

        x = s.getArrayFromEpoch(ee, 'x')
        y = s.getArrayFromEpoch(ee, 'y')
        xpix = s.getArrayFromEpoch(ee, 'xpix')
        ypix = s.getArrayFromEpoch(ee, 'ypix')

        diffx = xfit - x
        diffy = yfit - y
        diff = na.sqrt(diffx**2 + diffy**2)

        idx = (na.where(xpix > -999))[0]

        rmsX[idx] += diffx**2
        rmsY[idx] += diffy**2
        rms[idx] += diff**2
        cnt[idx] += 1

    rmsX = na.sqrt(rmsX / cnt) * 1000.0
    rmsY = na.sqrt(rmsY / cnt) * 1000.0
    rms = na.sqrt(rms / cnt) * 1000.0

    mag = s.getArray('mag')
    x = s.getArray('x')
    y = s.getArray('y')
    r = na.sqrt(x**2 + y**2)

    idx = (na.where(mag < 15))[0]

    p.clf()
    p.semilogy(r[idx], rms[idx], 'k.')
def vs_stats (res):
  """helper function to convert a result to a dict of
  (min,max,mean) values corresponding to each vellset."""
  stats = [];
  for vs in res.vellsets:
    a = vs.value;
    flags = getattr(vs,'flags',None);
    # apply flags if any
    if flags is None:
      (mn,mx,mean) = (a.min(),a.max(),a.mean());
    else:
      mn = numarray.where(flags,1e+99,a).min();
      mx = numarray.where(flags,-1e+99,a).max();
      mean = numarray.where(flags,0,a).mean();
    stats.append({'min':mn,'max':mx,'mean':mean});
  return stats;
Exemple #6
0
def determinant(a):
    """determinant(a) -> ||a||
 
    *a* may be either rank-2 or rank-3. If it is rank-2, it must square. 
    
    >>> A = [[1,2,3], [3,4,5], [5,6,7]]
    >>> _isClose(determinant(A), 0)
    1
    
    If *a* is rank-3, it is treated as an array of rank-2 matrices and
    must be square along the last 2 axes.
 
    >>> A = [[[1, 3], [2j, 3j]], [[2, 4], [4j, 4j]], [[3, 5], [6j, 5j]]]
    >>> _isClose(determinant(A), [-3j, -8j, -15j])
    1

    If *a* is not square along its last two axes, a LinAlgError is raised.
    
    >>> determinant(na.asarray(A)[...,:1])
    Traceback (most recent call last):
       ...
    LinearAlgebraError: Array (or it submatrices) must be square
    
    """
    a = na.asarray(a)
    _assertRank((2,3), a)
    _assertSubmatrixSquareness(a)
    stretched = (len(a.shape) == 2)
    if stretched:
        a = a[na.NewAxis,]
    t = _commonType(a)
    a = _castCopyAndTranspose(t, a, indices=(0,2,1))
    n_cases, n = a.shape[:2]
    if _array_kind[t] == 1:
        lapack_routine = lapack_lite2.zgetrf
    else:
        lapack_routine = lapack_lite2.dgetrf
    no_pivoting = na.arrayrange(1, n+1)
    pivots = na.zeros((n,), 'l')
    all_pivots = na.zeros((n_cases, n,), 'l')
    sum , not_equal = na.sum, na.not_equal
    stride = n * n * a.itemsize()
    pivots_stride = n * pivots.itemsize()
    view = a[0].view()
    view_pivots = all_pivots[0]
    a_i = view.copy()
    for i in range(n_cases):
        if i:
            a_i._copyFrom(view)
        outcome = lapack_routine(n, n, a_i, n, pivots, 0)
        view_pivots._copyFrom(pivots)
        view._copyFrom(a_i)
        view._byteoffset += stride
        view_pivots._byteoffset += pivots_stride
    signs = na.where(sum(not_equal(all_pivots, no_pivoting), 1) % 2, -1, 1).astype(t)
    for i in range(n):
        signs *= a[:,i,i]
    if stretched:
        signs = signs[0]
    return signs
def compStructure(v1, v2):
    nr_v = len(v1)

    G1 = Graph()
    G2 = Graph()

    for p in range(len(v1[0])):

        for i in range(1, nr_v + 1):
            G1.AddVertex()
            G2.AddVertex()

        # finding number of unique entries in v1 and v2
        d_v1 = {}
        d_v2 = {}

        v1_ind = 0
        v2_ind = 0
        for i in range(nr_v):
            if not d_v1.has_key(v1[i, p]):
                t = numarray.where(v1[:, p] == v1[i, p])
                #             d_v1[v1[i,p]] =

                v1_ind += 1
            if not d_v2.has_key(v2[i, p]):
                d_v2[v2[i, p]] = v2_ind
                v2_ind += 1
Exemple #8
0
    def plotStar(star, time='orbit', color='k', alpha=1.0):
        # Choices for time are
        #   'orbit' = plot the whole orbit
        #   'obs' = duration of observation
        #   'extend' = duration of observation plus out to 2017.5
        orb = star.orbit

        # Determine time steps
        x = star.getArrayAllEpochs('x')
        y = star.getArrayAllEpochs('y')

        if (time == 'orbit'):
            t = na.arange(orb.t0+0.01, orb.t0+orb.p+0.11, orb.p/200.0, type=na.Float)
        if (time == 'obs'):
            idx = ( na.where(x != -1000) )[0]
            t = na.arange(math.floor(star.years[idx[0]]),
                          math.ceil(star.years[idx[-1]]),
                          0.1, type=na.Float)
        if (time == 'extend'):
            idx = ( na.where(x != -1000) )[0]
            t = na.arange(math.floor(star.years[idx[0]]),
                          2017.5,
                          0.1, type=na.Float)

        (r, v, a) = orb.kep2xyz(t, mass=mass, dist=dist)
        
        pp = pylab.plot(r[:,0], r[:,1], color=color, alpha=alpha)
        # To plot no line and just the data points:
        #pp = pylab.plot([], [],color=color)

        ##
        ## Now plot the actual data points
        ##
        # Load from points files
        if yrlyPts:
            # Just take the points from 'r' array spaced about a year apart
            roundT = array([int(round(t[qq])) for qq in range(len(t))])
            firstT = roundT[0]
            tPts = [roundT.searchsorted(firstT+zz+1) for zz in range(roundT[-1]-firstT)]
            c = pylab.scatter(r[tPts,0], r[tPts,1], 20.0, color, marker='o', faceted=False)
        else:
            # Get the actual data
            c = pylab.scatter(x, y, 20.0, color, marker='o', faceted=False)

        c.set_alpha(alpha)

        return pp
Exemple #9
0
def mask(clus_id, line_s):
    imagefile = c.imagefile
    sex_cata = c.sex_cata
    threshold = c.threshold
    thresh_area = c.thresh_area
    size = c.size
    mask_reg = c.mask_reg
    x = n.reshape(n.arange(size*size),(size,size)) % size
    x = x.astype(n.Float32)
    y = n.reshape(n.arange(size*size),(size,size)) / size
    y = y.astype(n.Float32)
    values = line_s.split()
    mask_file = 'mask_' + str(imagefile)[:6] + '_'  + str(clus_id) + '.fits'
    xcntr_o  = float(values[1]) #x center of the object
    ycntr_o  = float(values[2]) #y center of the object
    xcntr = size / 2.0 + 1.0 + xcntr_o - int(xcntr_o)
    ycntr = size / 2.0 + 1.0 + ycntr_o - int(ycntr_o)
    mag    = float(values[7]) #Magnitude
    radius = float(values[9]) #Half light radius
    mag_zero = c.mag_zero #magnitude zero point
    sky	 = float(values[10]) #sky 
    pos_ang = float(values[11]) - 90.0 #position angle
    axis_rat = 1.0 / float(values[12]) #axis ration b/a
    major_axis = float(values[14])	#major axis of the object
    z = n.zeros((size,size))
    for line_j in open(sex_cata,'r'):
        try:
            values = line_j.split()
            xcntr_n  = float(values[1]) #x center of the neighbour
            ycntr_n  = float(values[2]) #y center of the neighbour
            mag    = float(values[7]) #Magnitude
            radius = float(values[9]) #Half light radius
            sky      = float(values[10]) #sky
            pos_ang = float(values[11]) - 90.0 #position angle
            axis_rat = 1.0/float(values[12]) #axis ration b/a
            area = float(values[13])
            maj_axis = float(values[14])#major axis of neighbour
            if(abs(xcntr_n - xcntr_o) < size/2.0 and abs(ycntr_n - ycntr_o) \
               < size/2.0 and area < thresh_area):
                if(abs(xcntr_n - xcntr_o) >= major_axis * threshold or \
                   abs(ycntr_n - ycntr_o) >= major_axis * threshold):
                    if((xcntr_o - xcntr_n) < 0):
                        xn = xcntr + abs(xcntr_n - xcntr_o)
                    if((ycntr_o - ycntr_n) < 0):
                        yn = ycntr + abs(ycntr_n - ycntr_o)
                    if((xcntr_o - xcntr_n) > 0):
                        xn = xcntr - (xcntr_o - xcntr_n)
                    if((ycntr_o - ycntr_n) > 0):
                        yn = ycntr - (ycntr_o - ycntr_n)
                    tx = x - xn + 0.5 
                    ty = y - yn + 0.5
                    R = n.sqrt(tx**2.0 + ty**2.0)
                    z[n.where(R<=mask_reg*maj_axis)] = 1
        except:
            pass	
    hdu = pyfits.PrimaryHDU(z.astype(n.Float32))
    hdu.writeto(mask_file)
Exemple #10
0
def _rank_filter(input,
                 rank,
                 size=None,
                 footprint=None,
                 output=None,
                 mode="reflect",
                 cval=0.0,
                 origin=0,
                 operation='rank'):
    input = numarray.asarray(input)
    if isinstance(input.type(), numarray.ComplexType):
        raise TypeError, 'Complex type not supported'
    origins = _ni_support._normalize_sequence(origin, input.rank)
    if footprint == None:
        if size == None:
            raise RuntimeError, "no footprint or filter size provided"
        sizes = _ni_support._normalize_sequence(size, input.rank)
        footprint = numarray.ones(sizes, type=numarray.Bool)
    else:
        footprint = numarray.asarray(footprint, type=numarray.Bool)
    fshape = [ii for ii in footprint.shape if ii > 0]
    if len(fshape) != input.rank:
        raise RuntimeError, 'filter footprint array has incorrect shape.'
    for origin, lenf in zip(origins, fshape):
        if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf):
            raise ValueError, 'invalid origin'
    if not footprint.iscontiguous():
        footprint = footprint.copy()
    filter_size = numarray.where(footprint, 1, 0).sum()
    if operation == 'median':
        rank = filter_size // 2
    elif operation == 'percentile':
        percentile = rank
        if percentile < 0.0:
            percentile += 100.0
        if percentile < 0 or percentile > 100:
            raise RuntimeError, 'invalid percentile'
        if percentile == 100.0:
            rank = filter_size - 1
        else:
            rank = int(float(filter_size) * percentile / 100.0)
    if rank < 0:
        rank += filter_size
    if rank < 0 or rank >= filter_size:
        raise RuntimeError, 'rank not within filter footprint size'
    if rank == 0:
        return minimum_filter(input, None, footprint, output, mode, cval,
                              origin)
    elif rank == filter_size - 1:
        return maximum_filter(input, None, footprint, output, mode, cval,
                              origin)
    else:
        output, return_value = _ni_support._get_output(output, input)
        mode = _ni_support._extend_mode_to_code(mode)
        _nd_image.rank_filter(input, rank, footprint, output, mode, cval,
                              origins)
        return return_value
Exemple #11
0
def vs_stats(res):
    """helper function to convert a result to a dict of
  (min,max,mean) values corresponding to each vellset."""
    stats = []
    for vs in res.vellsets:
        a = vs.value
        flags = getattr(vs, 'flags', None)
        # apply flags if any
        if flags is None:
            (mn, mx, mean) = (a.min(), a.max(), a.mean())
        else:
            mn = numarray.where(flags, 1e+99, a).min()
            mx = numarray.where(flags, -1e+99, a).max()
            mean = numarray.where(flags, 0, a).mean()
        stats.append({
            'min': mn,
            'max': mx,
            'mean': mean
        })
    return stats
def create_predict_table(LCM, agent_set, agents_index, observed_choices_id, data_objects, geographies=[]):

    resources = data_objects

    mc_choices = sample_choice(LCM.model.probabilities)  # monte carlo choice
    mc_choices_index = LCM.model_resources.translate("index")[mc_choices]
    maxprob_choices = sample_choice(LCM.model.probabilities, method="max_prob")  # max prob choice
    maxprob_choices_index = LCM.model_resources.translate("index")[maxprob_choices]
    results = []

    gcs = resources.translate("gridcell")
    for geography in geographies:
        geos = resources.translate(geography)

        # get observed geo_id
        obs = copy_dataset(agent_set)
        obs.subset_by_index(agents_index)
        obs.set_values_of_one_attribute(gcs.id_name[0], observed_choices_id)

        resources.merge({"household": obs})  # , "gridcell": gcs, "zone": zones, "faz":fazes})
        obs.compute_variables(geos.id_name[0], resources=resources)
        obs_geo_ids = obs.get_attribute(geos.id_name[0])

        # count simulated choices
        sim = copy_dataset(obs)
        sim.set_values_of_one_attribute(gcs.id_name[0], gcs.get_id_attribute()[mc_choices_index])
        resources.merge({"household": sim})

        geos_size = geos.size()
        geo_ids = geos.get_id_attribute()

        pred_matrix = zeros((geos_size, geos_size))
        p_success = zeros((geos_size,)).astype(Float32)

        f = 0
        for geo_id in geo_ids:
            index_in_geo = where(obs_geo_ids == geo_id)[0]
            resources.merge({"select_index": index_in_geo})

            geos.compute_variables("number_of_select_households", resources=resources)
            pred_matrix[f] = geos.get_attribute("number_of_select_households")
            if sum(pred_matrix[f]) > 0:
                p_success[f] = float(pred_matrix[f, f]) / sum(pred_matrix[f])

            sim.increment_version("grid_id")  # to trigger recomputation in next iteration
            f += 1

        print p_success
        results.append((pred_matrix.copy(), p_success.copy()))

    return results
def create_predict_table(LCM, agent_set, agents_index, observed_choices_id, data_objects, geographies=[]):

    resources = data_objects
    
    mc_choices = sample_choice(LCM.model.probabilities)    #monte carlo choice
    mc_choices_index = LCM.model_resources.translate("index")[mc_choices]
    maxprob_choices = sample_choice(LCM.model.probabilities, method="max_prob")  #max prob choice
    maxprob_choices_index = LCM.model_resources.translate("index")[maxprob_choices]
    results = []
    
    gcs = resources.translate("gridcell")
    for geography in geographies:
        geos = resources.translate(geography)
        
        #get observed geo_id
        obs = copy_dataset(agent_set)    
        obs.subset_by_index(agents_index)
        obs.set_values_of_one_attribute(gcs.id_name[0], observed_choices_id) 
    
        resources.merge({"household": obs}) #, "gridcell": gcs, "zone": zones, "faz":fazes})
        obs.compute_variables(geos.id_name[0], resources=resources)
        obs_geo_ids = obs.get_attribute(geos.id_name[0])
        
        #count simulated choices
        sim = copy_dataset(obs)
        sim.set_values_of_one_attribute(gcs.id_name[0], gcs.get_id_attribute()[mc_choices_index]) 
        resources.merge({"household": sim})
    
        geos_size = geos.size()
        geo_ids = geos.get_id_attribute()
        
        pred_matrix = zeros((geos_size, geos_size))
        p_success = zeros((geos_size,)).astype(Float32)
        
        f = 0
        for geo_id in geo_ids:
            index_in_geo = where(obs_geo_ids == geo_id)[0]
            resources.merge({"select_index": index_in_geo})
    
            geos.compute_variables("number_of_select_households", resources=resources)
            pred_matrix[f] = geos.get_attribute("number_of_select_households")
            if sum(pred_matrix[f]) > 0:
                p_success[f] = float(pred_matrix[f, f])/sum(pred_matrix[f])
    
            sim.increment_version('grid_id')  #to trigger recomputation in next iteration
            f += 1
            
        print p_success
        results.append((pred_matrix.copy(), p_success.copy()))
        
    return results
Exemple #14
0
def sfr(t,p):
    "SFR vs logage on the grid 't', calculated from 'params'"
    # Set tangent points based on fractional distances between control points
    maxy02 = max(p.s2y,p.s1y)
    maxs1y = 2.*p.s1y-maxy02
    s1x = p.s0x+p.s1x*(p.s2x-p.s0x)
    s0tx = +p.s0tx*(s1x-p.s0x)
    s0ty = +p.s0ty*(p.s1y-p.s0y)
    s1ty = +p.s1ty*(maxs1y-p.s0y)
    s1tx = -p.s1tx*(p.s2x-s1x)
    s2tx = -p.s2tx*(p.s2x-s1x)
    s2ty = +p.s2ty*(p.s1y-p.s2y)
#   s1x = p.s0x+p.s1x*(p.s2x-p.s0x)

    sfr = bezier.interpolate(bezier.curve((
            bezier.point((p.s0x,p.s0y),(s0tx,s0ty)),
            bezier.point((s1x,p.s1y),(s1tx,s1ty)),
#           bezier.point((s1x,p.s1y),(s1tx,s1ty)),
            bezier.point((p.s2x,p.s2y),(s2tx,s2ty))
            ),8),t)
    sfr = numarray.where(t<p.s0x,0.,sfr)
    sfr = numarray.where(t>p.s2x,0.,sfr)
    return sfr
Exemple #15
0
def _rank_filter(
    input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, operation="rank"
):
    input = numarray.asarray(input)
    if isinstance(input.type(), numarray.ComplexType):
        raise TypeError, "Complex type not supported"
    origins = _ni_support._normalize_sequence(origin, input.rank)
    if footprint == None:
        if size == None:
            raise RuntimeError, "no footprint or filter size provided"
        sizes = _ni_support._normalize_sequence(size, input.rank)
        footprint = numarray.ones(sizes, type=numarray.Bool)
    else:
        footprint = numarray.asarray(footprint, type=numarray.Bool)
    fshape = [ii for ii in footprint.shape if ii > 0]
    if len(fshape) != input.rank:
        raise RuntimeError, "filter footprint array has incorrect shape."
    for origin, lenf in zip(origins, fshape):
        if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf):
            raise ValueError, "invalid origin"
    if not footprint.iscontiguous():
        footprint = footprint.copy()
    filter_size = numarray.where(footprint, 1, 0).sum()
    if operation == "median":
        rank = filter_size // 2
    elif operation == "percentile":
        percentile = rank
        if percentile < 0.0:
            percentile += 100.0
        if percentile < 0 or percentile > 100:
            raise RuntimeError, "invalid percentile"
        if percentile == 100.0:
            rank = filter_size - 1
        else:
            rank = int(float(filter_size) * percentile / 100.0)
    if rank < 0:
        rank += filter_size
    if rank < 0 or rank >= filter_size:
        raise RuntimeError, "rank not within filter footprint size"
    if rank == 0:
        return minimum_filter(input, None, footprint, output, mode, cval, origin)
    elif rank == filter_size - 1:
        return maximum_filter(input, None, footprint, output, mode, cval, origin)
    else:
        output, return_value = _ni_support._get_output(output, input)
        mode = _ni_support._extend_mode_to_code(mode)
        _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins)
        return return_value
Exemple #16
0
    def makegrid(self, nx, ny):
        """
 return arrays of shape (ny,nx) containing lon,lat coordinates of
 an equally spaced native projection grid.
        """
        dx = (self.urcrnrx - self.llcrnrx) / (nx - 1)
        dy = (self.urcrnry - self.llcrnry) / (ny - 1)
        x = self.llcrnrx + dx * N.indices((ny, nx))[1, :, :]
        y = self.llcrnry + dy * N.indices((ny, nx))[0, :, :]
        lons, lats = self(x, y, inverse=True)
        lons = N.where(lons < 0, lons + 360, lons)
        # mercator coordinate is plain longitude in x.
        if self.projparams['proj'] == 'merc':
            dx = (self.urcrnrlon - self.llcrnrlon) / (nx - 1)
            lons = self.llcrnrlon + dx * N.indices((ny, nx))[1, :, :]
        return lons, lats
Exemple #17
0
    def makegrid(self,nx,ny):
        """
 return arrays of shape (ny,nx) containing lon,lat coordinates of
 an equally spaced native projection grid.
        """
        dx = (self.urcrnrx-self.llcrnrx)/(nx-1)
        dy = (self.urcrnry-self.llcrnry)/(ny-1)  
        x = self.llcrnrx+dx*N.indices((ny,nx))[1,:,:]
        y = self.llcrnry+dy*N.indices((ny,nx))[0,:,:]
        lons, lats = self(x, y, inverse=True)
        lons = N.where(lons < 0, lons+360, lons)
        # mercator coordinate is plain longitude in x.
        if self.projparams['proj'] == 'merc':
            dx = (self.urcrnrlon-self.llcrnrlon)/(nx-1)
            lons = self.llcrnrlon+dx*N.indices((ny,nx))[1,:,:]
        return lons, lats
Exemple #18
0
    def addTufts( self, prjPnts, cnn, info, prjObj, name  ):
        '''
	    Add an iso-line actor to the scene graph

	    Arguments:
	        prjPnts     - Projected points
		cnn	    - Connectivity information
		info	    - Model information
		prjObj      - AcuPrj Object
		name	    - An optional name for tufts actor

	    Output:
	        None
        '''

        self.prj    = prjObj
 
        row         = 0
        tuftCnn     = []
        self.pnts   = []
        for prjPnt in prjPnts:
            crdPnt  = numarray.where( prjPnt > 0 )[0]
            if len( crdPnt ) > 0:
                tuftCnn.append(             cnn[row]                )
                self.pnts.append(           prjPnt                  )
            row     += 1

        for i in range(len(tuftCnn) - 1, -1, -1):
            if tuftCnn[i] >= len( self.pnts ):
                del tuftCnn[i]

        tuftCnn     = numarray.array(       tuftCnn                 )                    
        self.pnts   = numarray.array(       self.pnts               )
        
        item        = AcuItemObj(           name,       info[0],
                                            info[1],    info[2],
                                            info[3],    info[4]     )

        tuftActor   = self.sceneGraph.addPntSet(    self.pnts,
                                                    tuftCnn,
                                                    'point',
                                                    item            )

        self.actList.append(                tuftActor               )
def plotPosError(starlist, outsuffix, imgtype='png'):
    """
    Load up a starlist output by align_rms.
    """

    tab = asciidata.open(starlist)

    mag = tab[1].tonumarray()
    x = tab[3].tonumarray()
    y = tab[4].tonumarray()
    xerr = tab[5].tonumarray()
    yerr = tab[6].tonumarray()

    xdiff = x - 512.0
    ydiff = y - 512.0
    r = na.sqrt(xdiff**2 + ydiff**2)

    err = (xerr + yerr) / 2.0

    # Plot magnitude dependance
    p.clf()
    p.semilogy(mag, err*9.94, 'k.')

    idx = (na.where(mag < 14))[0]

    medianPix = na.linear_algebra.mlab.median(err[idx])
    medianArc = medianPix * 9.94
    p.text(9, 60, 'For (8 < K < 14)')
    p.text(9, 40, 'Median Error = %5.3f pix (%5.3f mas)' %
           (medianPix, medianArc))
    p.xlabel('Magnitude')
    p.ylabel('Positional Uncertainty (mas)''Positional Uncertainty (mas)')

    p.savefig('pos_error_mag_%s.%s' % (outsuffix, imgtype))


    # Plot radial dependance
    p.clf()
    p.semilogy(r[idx] * 0.00994, err[idx] * 9.94, 'k.')
    p.xlabel('Radius from Field Center (arcsec)')
    p.ylabel('Positional Uncertainty (mas)')
    p.savefig('pos_error_r_%s.%s' % (outsuffix, imgtype))
Exemple #20
0
def plotPosError(starlist, outsuffix, imgtype='png'):
    """
    Load up a starlist output by align_rms.
    """

    tab = asciidata.open(starlist)

    mag = tab[1].tonumarray()
    x = tab[3].tonumarray()
    y = tab[4].tonumarray()
    xerr = tab[5].tonumarray()
    yerr = tab[6].tonumarray()

    xdiff = x - 512.0
    ydiff = y - 512.0
    r = na.sqrt(xdiff**2 + ydiff**2)

    err = (xerr + yerr) / 2.0

    # Plot magnitude dependance
    p.clf()
    p.semilogy(mag, err * 9.94, 'k.')

    idx = (na.where(mag < 14))[0]

    medianPix = na.linear_algebra.mlab.median(err[idx])
    medianArc = medianPix * 9.94
    p.text(9, 60, 'For (8 < K < 14)')
    p.text(9, 40,
           'Median Error = %5.3f pix (%5.3f mas)' % (medianPix, medianArc))
    p.xlabel('Magnitude')
    p.ylabel('Positional Uncertainty (mas)' 'Positional Uncertainty (mas)')

    p.savefig('pos_error_mag_%s.%s' % (outsuffix, imgtype))

    # Plot radial dependance
    p.clf()
    p.semilogy(r[idx] * 0.00994, err[idx] * 9.94, 'k.')
    p.xlabel('Radius from Field Center (arcsec)')
    p.ylabel('Positional Uncertainty (mas)')
    p.savefig('pos_error_r_%s.%s' % (outsuffix, imgtype))
Exemple #21
0
    def checkGtis(self, dtime=30):
        tstart, tstop = self.tstart, self.tstop
        time = lambda y : y*dtime + tstart
        lc = self._get_lc(dtime)

        indx = num.where(lc != 0)[0]
        gti_indx = [indx[0]]
        for i, item in enumerate(indx[1:]):
            if item - 1 != indx[i]:
                gti_indx.extend((indx[i], item))
        gti_indx.append(indx[-1])

        istart = gti_indx[::2]
        istop = gti_indx[1::2]

        tmin, tmax = [], []
        for istart, istop in zip(gti_indx[::2], gti_indx[1::2]):
            tmin.append(time(istart) - dtime/2)
            tmax.append(time(istop) + dtime/2)
        tmin = num.array(tmin)
        tmax = num.array(tmax)

        self.data_integral = sum(tmax - tmin)
        self.gti_integral = sum(self.gti.STOP - self.gti.START)
        if ( len(tmin) != len(self.gti.START) or
             (max(abs(tmin - self.gti.START)) > dtime) or
             (max(abs(tmax - self.gti.STOP)) > dtime) ):
            self._write("GTIs do not match data.\n")
            self._write("Using %i sec time bins:\n" % dtime)

            self._write("  Integrated time of non-zero counts = %i\n" 
                        % self.data_integral)
            self._write("  Integral of GTIs = %i\n" % self.gti_integral)
            self.gtiTestPassed = False
        else:
            self.gtiTestPassed = True
            self._passed += 1
            self._write('.')
Exemple #22
0
    def drawmeridians(self,ax,meridians,color='k',linewidth=1., \
                      linestyle='--',dashes=[1,1],labels=[0,0,0,0],\
                      font='rm',fontsize=12):
        """
 draw meridians (longitude lines).

 ax - current axis instance.
 meridians - list containing longitude values to draw (in degrees).
 color - color to draw meridians (default black).
 linewidth - line width for meridians (default 1.)
 linestyle - line style for meridians (default '--', i.e. dashed).
 dashes - dash pattern for meridians (default [1,1], i.e. 1 pixel on,
  1 pixel off).
 labels - list of 4 values (default [0,0,0,0]) that control whether
  meridians are labelled where they intersect the left, right, top or 
  bottom of the plot. For example labels=[1,0,0,1] will cause meridians
  to be labelled where they intersect the left and bottom of the plot,
  but not the right and top. Labels are located with a precision of 0.1
  degrees and are drawn using mathtext.
 font - mathtext font used for labels ('rm','tt','it' or 'cal', default 'rm'.
 fontsize - font size in points for labels (default 12).
        """
        # don't draw meridians past latmax, always draw parallel at latmax.
        latmax = 80.  # not used for cyl, merc projections.
        # offset for labels.
        yoffset = (self.urcrnry - self.llcrnry) / 100. / self.aspect
        xoffset = (self.urcrnrx - self.llcrnrx) / 100.

        if self.projection not in ['merc', 'cyl']:
            lats = N.arange(-latmax, latmax + 1).astype('f')
        else:
            lats = N.arange(-90, 91).astype('f')
        xdelta = 0.1 * (self.xmax - self.xmin)
        ydelta = 0.1 * (self.ymax - self.ymin)
        for merid in meridians:
            lons = merid * N.ones(len(lats), 'f')
            x, y = self(lons, lats)
            # remove points outside domain.
            testx = N.logical_and(x >= self.xmin - xdelta,
                                  x <= self.xmax + xdelta)
            x = N.compress(testx, x)
            y = N.compress(testx, y)
            testy = N.logical_and(y >= self.ymin - ydelta,
                                  y <= self.ymax + ydelta)
            x = N.compress(testy, x)
            y = N.compress(testy, y)
            if len(x) > 1 and len(y) > 1:
                # split into separate line segments if necessary.
                # (not necessary for mercator or cylindrical).
                xd = (x[1:] - x[0:-1])**2
                yd = (y[1:] - y[0:-1])**2
                dist = N.sqrt(xd + yd)
                split = dist > 500000.
                if N.sum(split) and self.projection not in ['merc', 'cyl']:
                    ind = (N.compress(
                        split, pylab.squeeze(split * N.indices(xd.shape))) +
                           1).tolist()
                    xl = []
                    yl = []
                    iprev = 0
                    ind.append(len(xd))
                    for i in ind:
                        xl.append(x[iprev:i])
                        yl.append(y[iprev:i])
                        iprev = i
                else:
                    xl = [x]
                    yl = [y]
                # draw each line segment.
                for x, y in zip(xl, yl):
                    # skip if only a point.
                    if len(x) > 1 and len(y) > 1:
                        l = Line2D(x,
                                   y,
                                   linewidth=linewidth,
                                   linestyle=linestyle)
                        l.set_color(color)
                        l.set_dashes(dashes)
                        ax.add_line(l)
        # draw labels for meridians.
        # search along edges of map to see if parallels intersect.
        # if so, find x,y location of intersection and draw a label there.
        if self.projection == 'cyl':
            dx = 0.01
            dy = 0.01
        elif self.projection == 'merc':
            dx = 0.01
            dy = 1000
        else:
            dx = 1000
            dy = 1000
        for dolab, side in zip(labels, ['l', 'r', 't', 'b']):
            if not dolab: continue
            # for cyl or merc, don't draw meridians on left or right.
            if self.projection in ['cyl', 'merc'] and side in ['l', 'r']:
                continue
            if side in ['l', 'r']:
                nmax = int((self.ymax - self.ymin) / dy + 1)
                if self.urcrnry < self.llcrnry:
                    yy = self.llcrnry - dy * N.arange(nmax)
                else:
                    yy = self.llcrnry + dy * N.arange(nmax)
                if side == 'l':
                    lons, lats = self(self.llcrnrx * N.ones(yy.shape, 'f'),
                                      yy,
                                      inverse=True)
                else:
                    lons, lats = self(self.urcrnrx * N.ones(yy.shape, 'f'),
                                      yy,
                                      inverse=True)
                lons = N.where(lons < 0, lons + 360, lons)
                lons = [int(lon * 10) for lon in lons.tolist()]
                lats = [int(lat * 10) for lat in lats.tolist()]
            else:
                nmax = int((self.xmax - self.xmin) / dx + 1)
                if self.urcrnrx < self.llcrnrx:
                    xx = self.llcrnrx - dx * N.arange(nmax)
                else:
                    xx = self.llcrnrx + dx * N.arange(nmax)
                if side == 'b':
                    lons, lats = self(xx,
                                      self.llcrnry * N.ones(xx.shape, 'f'),
                                      inverse=True)
                else:
                    lons, lats = self(xx,
                                      self.urcrnry * N.ones(xx.shape, 'f'),
                                      inverse=True)
                lons = N.where(lons < 0, lons + 360, lons)
                lons = [int(lon * 10) for lon in lons.tolist()]
                lats = [int(lat * 10) for lat in lats.tolist()]
            for lon in meridians:
                if lon < 0: lon = lon + 360.
                # find index of meridian (there may be two, so
                # search from left and right).
                try:
                    nl = lons.index(int(lon * 10))
                except:
                    nl = -1
                try:
                    nr = len(lons) - lons[::-1].index(int(lon * 10)) - 1
                except:
                    nr = -1
                if lon > 180:
                    lonlab = r'$\%s{%g\/^{\circ}\/W}$' % (font,
                                                          N.fabs(lon - 360))
                elif lon < 180 and lon != 0:
                    lonlab = r'$\%s{%g\/^{\circ}\/E}$' % (font, lon)
                else:
                    lonlab = r'$\%s{%g\/^{\circ}}$' % (font, lon)
                # meridians can intersect each map edge twice.
                for i, n in enumerate([nl, nr]):
                    lat = lats[n] / 10.
                    # no meridians > latmax for projections other than merc,cyl.
                    if self.projection not in ['merc', 'cyl'] and lat > latmax:
                        continue
                    # don't bother if close to the first label.
                    if i and abs(nr - nl) < 100: continue
                    if n > 0:
                        if side == 'l':
                            pylab.text(self.llcrnrx - xoffset,
                                       yy[n],
                                       lonlab,
                                       horizontalalignment='right',
                                       verticalalignment='center',
                                       fontsize=fontsize)
                        elif side == 'r':
                            pylab.text(self.urcrnrx + xoffset,
                                       yy[n],
                                       lonlab,
                                       horizontalalignment='left',
                                       verticalalignment='center',
                                       fontsize=fontsize)
                        elif side == 'b':
                            pylab.text(xx[n],
                                       self.llcrnry - yoffset,
                                       lonlab,
                                       horizontalalignment='center',
                                       verticalalignment='top',
                                       fontsize=fontsize)
                        else:
                            pylab.text(xx[n],
                                       self.urcrnry + yoffset,
                                       lonlab,
                                       horizontalalignment='center',
                                       verticalalignment='bottom',
                                       fontsize=fontsize)

        # make sure axis ticks are turned off
        ax.set_xticks([])
        ax.set_yticks([])
Exemple #23
0
def distance_transform_edt(input,
                           sampling=None,
                           return_distances=True,
                           return_indices=False,
                           distances=None,
                           indices=None):
    """Exact euclidean distance transform.

    In addition to the distance transform, the feature transform can
    be calculated. In this case the index of the closest background
    element is returned along the first axis of the result.

    The return_distances, and return_indices flags can be used to
    indicate if the distance transform, the feature transform, or both
    must be returned.

    Optionally the sampling along each axis can be given by the
    sampling parameter which should be a sequence of length equal to
    the input rank, or a single number in which the sampling is assumed
    to be equal along all axes.

    the distances and indices arguments can be used to give optional
    output arrays that must be of the correct size and type (Float64
    and Int32).
    """
    if (not return_distances) and (not return_indices):
        msg = 'at least one of distances/indices must be specified'
        raise RuntimeError, msg
    ft_inplace = isinstance(indices, numarray.NumArray)
    dt_inplace = isinstance(distances, numarray.NumArray)
    # calculate the feature transform
    input = numarray.where(input, 1, 0).astype(numarray.Int8)
    if sampling is not None:
        sampling = _ni_support._normalize_sequence(sampling, input.rank)
        sampling = numarray.asarray(sampling, type=numarray.Float64)
        if not sampling.iscontiguous():
            sampling = sampling.copy()
    if ft_inplace:
        ft = indices
        if ft.shape != (input.rank, ) + input.shape:
            raise RuntimeError, 'indices has wrong shape'
        if ft.type() != numarray.Int32:
            raise RuntimeError, 'indices must be of Int32 type'
    else:
        ft = numarray.zeros((input.rank, ) + input.shape, type=numarray.Int32)
    _nd_image.euclidean_feature_transform(input, sampling, ft)
    # if requested, calculate the distance transform
    if return_distances:
        dt = ft - numarray.indices(input.shape, type=ft.type())
        dt = dt.astype(numarray.Float64)
        if sampling is not None:
            for ii in range(len(sampling)):
                dt[ii, ...] *= sampling[ii]
        numarray.multiply(dt, dt, dt)
        if dt_inplace:
            dt = numarray.add.reduce(dt, axis=0)
            if distances.shape != dt.shape:
                raise RuntimeError, 'indices has wrong shape'
            if distances.type() != numarray.Float64:
                raise RuntimeError, 'indices must be of Float64 type'
            numarray.sqrt(dt, distances)
            del dt
        else:
            dt = numarray.add.reduce(dt, axis=0)
            dt = numarray.sqrt(dt)
    # construct and return the result
    result = []
    if return_distances and not dt_inplace:
        result.append(dt)
    if return_indices and not ft_inplace:
        result.append(ft)
    if len(result) == 2:
        return tuple(result)
    elif len(result) == 1:
        return result[0]
    else:
        return None
Exemple #24
0
# The dimensions of the resulting numarrays are
#   number of rows = number of events
#   number of columns = size of the vector (16)
#
print "Verify shape of ROOT array variable"
na = daSvac['GemTkrVector']
print na.shape

import time
tstart = time.time()
print "start"
TkrTriggered =  ( daSvac [ 'GemTkrVector'] == 1).astype(numarray.Int32)
print "Verify shape of result of vector expression"
print TkrTriggered.shape

CalLeTriggered = numarray.where ( daSvac ['GemCalLeVector'] == 1, 1, 0 )

#
# The labels of the columns we will add to the data set
#
label_TkrTriggered = 'Number of Towers with Tkr Triggers'
label_CalLeTriggered = 'Number of Towers with CalLe Triggers'
label_TowerTkrTrigGemCond = 'Number of Towers with Tkr Triggers after Cut'
label_TowerCalLeTrigGemCond = 'Number of Towers with CalLe Triggers after Cut'

#
# For each event, take the sum of the columns.
# Second argument in sum() is 0 for sum of row and 1 for column
# Then add resulting vector as a new column to the data set
#
nbrTkrTriggered = numarray.sum ( TkrTriggered, 1 )
Exemple #25
0
llcornerx, llcornery = proj(-145.5,1.)
xmax=11297266.68; ymax=8959901.16
params['x_0'] = -llcornerx # add cartesian offset so lower left corner = (0,0)
params['y_0'] = -llcornery
# create a Proj instance for desired map.
proj = Proj(params)

# define grid (nx x ny regularly spaced native projection grid)
nx = 349; ny = 277                                                              
dx = xmax/(nx-1); dy = ymax/(ny-1)
xgrid = dx*numarray.indices((ny,nx))[1,:,:]
ygrid = dy*numarray.indices((ny,nx))[0,:,:]
# compute lons, lats of regular projection grid.
lonout, latout = proj(xgrid, ygrid, inverse=True)
# make sure lons are between 0 and 360
lonout = numarray.where(lonout < 0, lonout+360, lonout)
# make lat into colat (monotonically increasing from 0 at S Pole
# to 180 at N Pole).
latout = latout+90

# read in topo data from pickle (on a regular lat/lon grid)
topodict = cPickle.load(open('etopo20.pickle','rb'))        
lons = topodict['lons']
lats = topodict['lats']
topoin = topodict['topo']

# find coordinates of native projection grid.
xcoords = (len(lons)-1)*(lonout-lons[0])/(lons[-1]-lons[0])
ycoords = (len(lats)-1)*(latout-lats[0])/(lats[-1]-lats[0])
coords = [ycoords,xcoords]
# interpolate to projection grid using numarray.nd_image spline filter.
Exemple #26
0
    def drawmeridians(self,ax,meridians,color='k',linewidth=1., \
                      linestyle='--',dashes=[1,1],labels=[0,0,0,0],\
                      font='rm',fontsize=12):
        """
 draw meridians (longitude lines).

 ax - current axis instance.
 meridians - list containing longitude values to draw (in degrees).
 color - color to draw meridians (default black).
 linewidth - line width for meridians (default 1.)
 linestyle - line style for meridians (default '--', i.e. dashed).
 dashes - dash pattern for meridians (default [1,1], i.e. 1 pixel on,
  1 pixel off).
 labels - list of 4 values (default [0,0,0,0]) that control whether
  meridians are labelled where they intersect the left, right, top or 
  bottom of the plot. For example labels=[1,0,0,1] will cause meridians
  to be labelled where they intersect the left and bottom of the plot,
  but not the right and top. Labels are located with a precision of 0.1
  degrees and are drawn using mathtext.
 font - mathtext font used for labels ('rm','tt','it' or 'cal', default 'rm'.
 fontsize - font size in points for labels (default 12).
        """
        # don't draw meridians past latmax, always draw parallel at latmax.
        latmax = 80. # not used for cyl, merc projections.
        # offset for labels.
	yoffset = (self.urcrnry-self.llcrnry)/100./self.aspect
	xoffset = (self.urcrnrx-self.llcrnrx)/100.

        if self.projection not in ['merc','cyl']:
            lats = N.arange(-latmax,latmax+1).astype('f')
        else:
            lats = N.arange(-90,91).astype('f')
        xdelta = 0.1*(self.xmax-self.xmin)
        ydelta = 0.1*(self.ymax-self.ymin)
        for merid in meridians:
            lons = merid*N.ones(len(lats),'f')
            x,y = self(lons,lats)
            # remove points outside domain.
            testx = N.logical_and(x>=self.xmin-xdelta,x<=self.xmax+xdelta)
            x = N.compress(testx, x)
            y = N.compress(testx, y)
            testy = N.logical_and(y>=self.ymin-ydelta,y<=self.ymax+ydelta)
            x = N.compress(testy, x)
            y = N.compress(testy, y)
            if len(x) > 1 and len(y) > 1:
                # split into separate line segments if necessary.
                # (not necessary for mercator or cylindrical).
                xd = (x[1:]-x[0:-1])**2
                yd = (y[1:]-y[0:-1])**2
                dist = N.sqrt(xd+yd)
                split = dist > 500000.
                if N.sum(split) and self.projection not in ['merc','cyl']:
                   ind = (N.compress(split,pylab.squeeze(split*N.indices(xd.shape)))+1).tolist()
                   xl = []
                   yl = []
                   iprev = 0
                   ind.append(len(xd))
                   for i in ind:
                       xl.append(x[iprev:i])
                       yl.append(y[iprev:i])
                       iprev = i
                else:
                    xl = [x]
                    yl = [y]
                # draw each line segment.
                for x,y in zip(xl,yl):
                    # skip if only a point.
                    if len(x) > 1 and len(y) > 1:
                        l = Line2D(x,y,linewidth=linewidth,linestyle=linestyle)
                        l.set_color(color)
                        l.set_dashes(dashes)
                        ax.add_line(l)
        # draw labels for meridians.
        # search along edges of map to see if parallels intersect.
        # if so, find x,y location of intersection and draw a label there.
        if self.projection == 'cyl':
            dx = 0.01; dy = 0.01
        elif self.projection == 'merc':
            dx = 0.01; dy = 1000
        else:
            dx = 1000; dy = 1000
        for dolab,side in zip(labels,['l','r','t','b']):
            if not dolab: continue
            # for cyl or merc, don't draw meridians on left or right.
            if self.projection in ['cyl','merc'] and side in ['l','r']: continue
            if side in ['l','r']:
	        nmax = int((self.ymax-self.ymin)/dy+1)
                if self.urcrnry < self.llcrnry:
	            yy = self.llcrnry-dy*N.arange(nmax)
                else:
	            yy = self.llcrnry+dy*N.arange(nmax)
                if side == 'l':
	            lons,lats = self(self.llcrnrx*N.ones(yy.shape,'f'),yy,inverse=True)
                else:
	            lons,lats = self(self.urcrnrx*N.ones(yy.shape,'f'),yy,inverse=True)
                lons = N.where(lons < 0, lons+360, lons)
                lons = [int(lon*10) for lon in lons.tolist()]
                lats = [int(lat*10) for lat in lats.tolist()]
            else:
	        nmax = int((self.xmax-self.xmin)/dx+1)
                if self.urcrnrx < self.llcrnrx:
	            xx = self.llcrnrx-dx*N.arange(nmax)
                else:
	            xx = self.llcrnrx+dx*N.arange(nmax)
                if side == 'b':
	            lons,lats = self(xx,self.llcrnry*N.ones(xx.shape,'f'),inverse=True)
                else:
	            lons,lats = self(xx,self.urcrnry*N.ones(xx.shape,'f'),inverse=True)
                lons = N.where(lons < 0, lons+360, lons)
                lons = [int(lon*10) for lon in lons.tolist()]
                lats = [int(lat*10) for lat in lats.tolist()]
            for lon in meridians:
                if lon<0: lon=lon+360.
                # find index of meridian (there may be two, so
                # search from left and right).
                try:
                    nl = lons.index(int(lon*10))
                except:
                    nl = -1
                try:
                    nr = len(lons)-lons[::-1].index(int(lon*10))-1
                except:
                    nr = -1
        	if lon>180:
        	    lonlab = r'$\%s{%g\/^{\circ}\/W}$'%(font,N.fabs(lon-360))
        	elif lon<180 and lon != 0:
        	    lonlab = r'$\%s{%g\/^{\circ}\/E}$'%(font,lon)
        	else:
        	    lonlab = r'$\%s{%g\/^{\circ}}$'%(font,lon)
                # meridians can intersect each map edge twice.
                for i,n in enumerate([nl,nr]):
                    lat = lats[n]/10.
                    # no meridians > latmax for projections other than merc,cyl.
                    if self.projection not in ['merc','cyl'] and lat > latmax: continue
                    # don't bother if close to the first label.
                    if i and abs(nr-nl) < 100: continue
                    if n > 0:
                        if side == 'l':
        	            pylab.text(self.llcrnrx-xoffset,yy[n],lonlab,horizontalalignment='right',verticalalignment='center',fontsize=fontsize)
                        elif side == 'r':
        	            pylab.text(self.urcrnrx+xoffset,yy[n],lonlab,horizontalalignment='left',verticalalignment='center',fontsize=fontsize)
                        elif side == 'b':
        	            pylab.text(xx[n],self.llcrnry-yoffset,lonlab,horizontalalignment='center',verticalalignment='top',fontsize=fontsize)
                        else:
        	            pylab.text(xx[n],self.urcrnry+yoffset,lonlab,horizontalalignment='center',verticalalignment='bottom',fontsize=fontsize)

        # make sure axis ticks are turned off
        ax.set_xticks([]) 
        ax.set_yticks([])
def run():
    # Assume circular orbits with isotropic normal vectors on circular
    # orbits with circular orbital velocity of 500 km/s.
    ntrials = 10000
    nstars = int((1.0 / 3.0) * 100)
    gens = aorb.create_generators(7, ntrials*10)

    velTot = 500.0  # km/s -- velocity amplitude
    radTot = 2.04   # arcsec

    def fitfunc(p, fjac=None, vx=None, vy=None, vz=None, 
		vxerr=None, vyerr=None, vzerr=None):
        irad = math.radians(p[0])
        orad = math.radians(p[1])
	nx = math.sin(irad) * math.cos(orad)
	ny = -math.sin(irad) * math.sin(orad)
	nz = -math.cos(irad)

	top = (nx*vx + ny*vy + nz*vz)**2
	bot = (nx*vxerr + ny*vyerr + nz*vzerr)**2

	devs = (1.0 / (len(vx) - 1.0)) * top / bot
	status = 0

        return [status, devs.flat]

    # Keep track from every trial the incl, omeg, chi2, number of stars
    incl = na.zeros(ntrials, type=na.Float)
    omeg = na.zeros(ntrials, type=na.Float)
    chi2 = na.zeros(ntrials, type=na.Float)
    niter = na.zeros(ntrials)
    stars = na.zeros(ntrials)

    # Keep track of same stuff for spatial test
    inclPos = na.zeros(ntrials, type=na.Float)
    omegPos = na.zeros(ntrials, type=na.Float)
    chi2Pos = na.zeros(ntrials, type=na.Float)
    niterPos = na.zeros(ntrials)

    angleAvg = na.zeros(ntrials, type=na.Float)
    angleStd = na.zeros(ntrials, type=na.Float)

    for trial in range(ntrials):
	if ((trial % 100) == 0):
	    print 'Trial %d' % trial

	x = na.zeros(nstars, type=na.Float)
	y = na.zeros(nstars, type=na.Float)
	z = na.zeros(nstars, type=na.Float)

	vx = na.zeros(nstars, type=na.Float)
	vy = na.zeros(nstars, type=na.Float)
	vz = na.zeros(nstars, type=na.Float)

	rmag = na.zeros(nstars, type=na.Float)
	vmag = na.zeros(nstars, type=na.Float)
	nx = na.zeros(nstars, type=na.Float)
	ny = na.zeros(nstars, type=na.Float)
	nz = na.zeros(nstars, type=na.Float)
	

	for ss in range(nstars):
	    vx[ss] = gens[0].uniform(-velTot, velTot)
	
	    vyTot = math.sqrt(velTot**2 - vx[ss]**2)
	    vy[ss] = gens[1].uniform(-vyTot, vyTot)
	    
	    vz[ss] = math.sqrt(velTot**2 - vx[ss]**2 - vy[ss]**2)
	    vz[ss] *= gens[2].choice([-1.0, 1.0])
	    
	    x[ss] = gens[3].uniform(-radTot, radTot)

	    yTot = math.sqrt(radTot**2 - x[ss]**2)
	    y[ss] = gens[4].uniform(-yTot, yTot)

	    z[ss] = math.sqrt(radTot**2 - x[ss]**2 - y[ss]**2)
	    z[ss] *= gens[5].choice([-1.0, 1.0])

	    rmag[ss] = math.sqrt(x[ss]**2 + y[ss]**2 + z[ss]**2)
	    vmag[ss] = math.sqrt(vx[ss]**2 + vy[ss]**2 + vz[ss]**2)

	    rvec = [x[ss], y[ss], z[ss]]
	    vvec = [vx[ss], vy[ss], vz[ss]]
	    tmp = util.cross_product(rvec, vvec)
	    tmp /= rmag[ss] * vmag[ss]
	    nx[ss] = tmp[0]
	    ny[ss] = tmp[1]
	    nz[ss] = tmp[2]
	    
	r2d = na.hypot(x, y)
	v2d = na.hypot(vx, vy)
	top = (x * vy - y * vx)
	jz = (x * vy - y * vx) / (r2d * v2d)

	djzdx = (vy * r2d * v2d - (top * v2d * x / r2d)) / (r2d * v2d)**2
	djzdy = (-vx * r2d * v2d - (top * v2d * y / r2d)) / (r2d * v2d)**2
	djzdvx = (-y * r2d * v2d - (top * r2d * vx / v2d)) / (r2d * v2d)**2
	djzdvy = (x * r2d * v2d - (top * r2d * vy / v2d)) / (r2d * v2d)**2
	
	xerr = na.zeros(nstars, type=na.Float) + 0.001 # arcsec
	yerr = na.zeros(nstars, type=na.Float) + 0.001
	vxerr = na.zeros(nstars, type=na.Float) + 10.0  # km/s
	vyerr = na.zeros(nstars, type=na.Float) + 10.0
	vzerr = na.zeros(nstars, type=na.Float) + 30.0 # km/s
	
	jzerr = na.sqrt((djzdx*xerr)**2 + (djzdy*yerr)**2 + 
			(djzdvx*vxerr)**2 + (djzdvy*vyerr)**2)

	# Eliminate all stars with jz > 0 and jz/jzerr < 2
	# I think these are they cuts they are doing
	idx = (na.where((jz < 0) & (na.abs(jz/jzerr) > 2)))[0]
	#idx = (na.where(jz < 0))[0]
	#idx = range(len(jz))

	cotTheta = vz / na.sqrt(vx**2 + vy**2)
	phi = na.arctan2(vy, vx)

	# Initial guess:
	p0 = na.zeros(2, type=na.Float)
	p0[0] = gens[5].uniform(0.1, 90)     # deg -- inclination
	p0[1] = gens[6].uniform(0.1, 360)     # deg -- omega

	# Setup properties of each free parameter.
	parinfo = {'relStep':10.0, 'step':10.0, 'fixed':0, 
		   'limits':[0.0,360.0],
		   'limited':[1,1], 'mpside':1}
	pinfo = [parinfo.copy() for i in range(len(p0))]

	pinfo[0]['limits'] = [0.0, 180.0]
	pinfo[1]['limits'] = [0.0, 360.0]

	# Stuff to pass into the fit function
	functargs = {'vx': vx[idx], 'vy': vy[idx], 'vz': vz[idx],
		     'vxerr':vxerr[idx], 'vyerr':vyerr[idx], 'vzerr':vzerr[idx]}

	m = mpfit.mpfit(fitfunc, p0, functkw=functargs, parinfo=pinfo,
			quiet=1)
	if (m.status <= 0): 
	    print 'error message = ', m.errmsg

	p = m.params

	incl[trial] = p[0]
	omeg[trial] = p[1]
	stars[trial] = len(idx)
	chi2[trial] = m.fnorm / (stars[trial] - len(p0))
	niter[trial] = m.niter
	
	n = [math.sin(p[0]) * math.cos(p[1]),
	     -math.sin(p[0]) * math.sin(p[1]),
	     -math.cos(p[0])]

	# Now look at the angle between the best fit normal vector
	# from the velocity data and the true r cross v normal vector.
	# Take the dot product between n and nreal.
	angle = na.arccos(n[0]*nx + n[1]*ny + n[2]*nz)
	angle *= (180.0 / math.pi)

	# What is the average angle and std angle
	angleAvg[trial] = angle.mean()
	angleStd[trial] = angle.stddev()

# 	print chi2[trial], chi2Pos[trial], incl[trial], inclPos[trial], \
# 	    omeg[trial], omegPos[trial], niter[trial], niterPos[trial]
# 	print angleAvg[trial], angleStd[trial]
	

    # Plot up chi2 for v-fit vs. chi2 for x-fit
    pylab.clf()
    pylab.semilogx(chi2, angleAvg, 'k.')
    pylab.errorbar(chi2, angleAvg, fmt='k.', yerr=angleStd)
    pylab.xlabel('Chi^2')
    pylab.ylabel('Angle w.r.t. Best Fit')
    foo = raw_input('Contine?')

    # Probability of encountering solution with chi^2 < 2
    idx = (na.where(chi2 < 2.0))[0]
    print 'Prob(chi^2 < 2) = %5.3f ' % (len(idx) / float(ntrials))

    # Probability of encountering solution with chi^2 < 2 AND 
    # inclination = 20 - 30 and Omega = 160 - 170
    foo = (na.where((chi2 < 2.0) & (incl > 20) & (incl < 40)))[0]
    print 'Prob of chi2 and incl = %5.3f' % (len(foo) / float(ntrials))

    pylab.clf()
    pylab.subplot(2, 2, 1)
    pylab.hist(chi2, bins=na.arange(0, 10, 0.5))
    pylab.xlabel('Log Chi^2')

    pylab.subplot(2, 2, 2)
    pylab.hist(incl[idx])
    pylab.xlabel('Inclination for Chi^2 < 2')
    rng = pylab.axis()
    pylab.axis([0, 180, rng[2], rng[3]])

    pylab.subplot(2, 2, 3)
    pylab.hist(omeg[idx])
    pylab.xlabel('Omega for Chi^2 < 2')
    rng = pylab.axis()
    pylab.axis([0, 360, rng[2], rng[3]])

    pylab.subplot(2, 2, 4)
    pylab.hist(stars[idx])
    pylab.xlabel('Nstars for Chi^2 < 2')
    rng = pylab.axis()
    pylab.axis([0, 33, rng[2], rng[3]])

    pylab.savefig('diskTest.png')
    
    # Pickle everything
    foo = {'incl': incl, 'omeg': omeg, 'star': stars, 
	   'chi2': chi2, 'niter': niter}

    pickle.dump(foo, open('diskTestSave.pick', 'w'))
Exemple #28
0
    #print v_list
    for v in v_list:
        t[v] = index
        index += 1

    for i in range(m.G):
        plot[i, j] = t[plot[i, j]]

space = 15
for i in range(len(plot[0])):
    slen = space - len(data.headers[i])
    print " " * slen + data.headers[i],
for i in range(len(plot)):
    print
    for j in range(len(plot[i])):
        print " " * (space - 1) + str(plot[i, j]),
print "\n"

print "\n[",
for p in plot:
    print p.tolist(), ";"
print "];\n\n"

for i in range(20):
    cut = i * 0.05
    z = m.classify(data, entropy_cutoff=cut, silent=1)
    t = numarray.where(z == -1)
    ind = t[0]
    print cut, ":", len(z[ind])

m.printTraceback(data, c)
Exemple #29
0
def distance_transform_cdt(input,
                           structure='chessboard',
                           return_distances=True,
                           return_indices=False,
                           distances=None,
                           indices=None):
    """Distance transform for chamfer type of transforms.

    The structure determines the type of chamfering that is done. If
    the structure is equal to 'cityblock' a structure is generated
    using generate_binary_structure with a squared distance equal to
    1. If the structure is equal to 'chessboard', a structure is
    generated using generate_binary_structure with a squared distance
    equal to the rank of the array. These choices correspond to the
    common interpretations of the cityblock and the chessboard
    distance metrics in two dimensions.

    In addition to the distance transform, the feature transform can
    be calculated. In this case the index of the closest background
    element is returned along the first axis of the result.

    The return_distances, and return_indices flags can be used to
    indicate if the distance transform, the feature transform, or both
    must be returned.
    
    The distances and indices arguments can be used to give optional
    output arrays that must be of the correct size and type (both Int32).
    """
    if (not return_distances) and (not return_indices):
        msg = 'at least one of distances/indices must be specified'
        raise RuntimeError, msg
    ft_inplace = isinstance(indices, numarray.NumArray)
    dt_inplace = isinstance(distances, numarray.NumArray)
    input = numarray.asarray(input)
    if structure == 'cityblock':
        rank = input.rank
        structure = generate_binary_structure(rank, 1)
    elif structure == 'chessboard':
        rank = input.rank
        structure = generate_binary_structure(rank, rank)
    else:
        try:
            structure = numarray.asarray(structure)
        except:
            raise RuntimeError, 'invalid structure provided'
        for s in structure.shape:
            if s != 3:
                raise RuntimeError, 'structure sizes must be equal to 3'
    if not structure.iscontiguous():
        structure = structure.copy()
    if dt_inplace:
        if distances.type() != numarray.Int32:
            raise RuntimeError, 'distances must be of Int32 type'
        if distances.shape != input.shape:
            raise RuntimeError, 'distances has wrong shape'
        dt = distances
        dt[...] = numarray.where(input, -1, 0).astype(numarray.Int32)
    else:
        dt = numarray.where(input, -1, 0).astype(numarray.Int32)
    rank = dt.rank
    if return_indices:
        sz = dt.nelements()
        ft = numarray.arange(sz, shape=dt.shape, type=numarray.Int32)
    else:
        ft = None
    _nd_image.distance_transform_op(structure, dt, ft)
    dt = dt[tuple([slice(None, None, -1)] * rank)]
    if return_indices:
        ft = ft[tuple([slice(None, None, -1)] * rank)]
    _nd_image.distance_transform_op(structure, dt, ft)
    dt = dt[tuple([slice(None, None, -1)] * rank)]
    if return_indices:
        ft = ft[tuple([slice(None, None, -1)] * rank)]
        ft = numarray.ravel(ft)
        if ft_inplace:
            if indices.type() != numarray.Int32:
                raise RuntimeError, 'indices must of Int32 type'
            if indices.shape != (dt.rank, ) + dt.shape:
                raise RuntimeError, 'indices has wrong shape'
            tmp = indices
        else:
            tmp = numarray.indices(dt.shape, type=numarray.Int32)
        for ii in range(tmp.shape[0]):
            rtmp = numarray.ravel(tmp[ii, ...])[ft]
            rtmp.setshape(dt.shape)
            tmp[ii, ...] = rtmp
        ft = tmp

    # construct and return the result
    result = []
    if return_distances and not dt_inplace:
        result.append(dt)
    if return_indices and not ft_inplace:
        result.append(ft)
    if len(result) == 2:
        return tuple(result)
    elif len(result) == 1:
        return result[0]
    else:
        return None
def run():
    # Assume circular orbits with isotropic normal vectors on circular
    # orbits with circular orbital velocity of 500 km/s.
    ntrials = 10000
    nstars = int((1.0 / 3.0) * 100)
    gens = aorb.create_generators(7, ntrials*10)

    velTot = 500.0  # km/s -- velocity amplitude
    radTot = 2.04   # arcsec

    def fitfunc(p, fjac=None, vx=None, vy=None, vz=None, 
		vxerr=None, vyerr=None, vzerr=None):
        irad = math.radians(p[0])
        orad = math.radians(p[1])
	nx = math.sin(irad) * math.cos(orad)
	ny = -math.sin(irad) * math.sin(orad)
	nz = -math.cos(irad)

	top = (nx*vx + ny*vy + nz*vz)**2
	bot = (nx*vxerr + ny*vyerr + nz*vzerr)**2

	devs = (1.0 / (len(vx) - 1.0)) * top / bot
	status = 0

        return [status, devs.flat]

    # Keep track from every trial the incl, omeg, chi2, number of stars
    incl = na.zeros(ntrials, type=na.Float)
    omeg = na.zeros(ntrials, type=na.Float)
    chi2 = na.zeros(ntrials, type=na.Float)
    niter = na.zeros(ntrials)
    stars = na.zeros(ntrials)

    # Keep track of same stuff for spatial test
    inclPos = na.zeros(ntrials, type=na.Float)
    omegPos = na.zeros(ntrials, type=na.Float)
    chi2Pos = na.zeros(ntrials, type=na.Float)
    niterPos = na.zeros(ntrials)

    angleAvg = na.zeros(ntrials, type=na.Float)
    angleStd = na.zeros(ntrials, type=na.Float)

    for trial in range(ntrials):
	if ((trial % 100) == 0):
	    print('Trial %d' % trial)

	x = na.zeros(nstars, type=na.Float)
	y = na.zeros(nstars, type=na.Float)
	z = na.zeros(nstars, type=na.Float)

	vx = na.zeros(nstars, type=na.Float)
	vy = na.zeros(nstars, type=na.Float)
	vz = na.zeros(nstars, type=na.Float)

	rmag = na.zeros(nstars, type=na.Float)
	vmag = na.zeros(nstars, type=na.Float)
	nx = na.zeros(nstars, type=na.Float)
	ny = na.zeros(nstars, type=na.Float)
	nz = na.zeros(nstars, type=na.Float)
	

	for ss in range(nstars):
	    vx[ss] = gens[0].uniform(-velTot, velTot)
	
	    vyTot = math.sqrt(velTot**2 - vx[ss]**2)
	    vy[ss] = gens[1].uniform(-vyTot, vyTot)
	    
	    vz[ss] = math.sqrt(velTot**2 - vx[ss]**2 - vy[ss]**2)
	    vz[ss] *= gens[2].choice([-1.0, 1.0])
	    
	    x[ss] = gens[3].uniform(-radTot, radTot)

	    yTot = math.sqrt(radTot**2 - x[ss]**2)
	    y[ss] = gens[4].uniform(-yTot, yTot)

	    z[ss] = math.sqrt(radTot**2 - x[ss]**2 - y[ss]**2)
	    z[ss] *= gens[5].choice([-1.0, 1.0])

	    rmag[ss] = math.sqrt(x[ss]**2 + y[ss]**2 + z[ss]**2)
	    vmag[ss] = math.sqrt(vx[ss]**2 + vy[ss]**2 + vz[ss]**2)

	    rvec = [x[ss], y[ss], z[ss]]
	    vvec = [vx[ss], vy[ss], vz[ss]]
	    tmp = util.cross_product(rvec, vvec)
	    tmp /= rmag[ss] * vmag[ss]
	    nx[ss] = tmp[0]
	    ny[ss] = tmp[1]
	    nz[ss] = tmp[2]
	    
	r2d = na.hypot(x, y)
	v2d = na.hypot(vx, vy)
	top = (x * vy - y * vx)
	jz = (x * vy - y * vx) / (r2d * v2d)

	djzdx = (vy * r2d * v2d - (top * v2d * x / r2d)) / (r2d * v2d)**2
	djzdy = (-vx * r2d * v2d - (top * v2d * y / r2d)) / (r2d * v2d)**2
	djzdvx = (-y * r2d * v2d - (top * r2d * vx / v2d)) / (r2d * v2d)**2
	djzdvy = (x * r2d * v2d - (top * r2d * vy / v2d)) / (r2d * v2d)**2
	
	xerr = na.zeros(nstars, type=na.Float) + 0.001 # arcsec
	yerr = na.zeros(nstars, type=na.Float) + 0.001
	vxerr = na.zeros(nstars, type=na.Float) + 10.0  # km/s
	vyerr = na.zeros(nstars, type=na.Float) + 10.0
	vzerr = na.zeros(nstars, type=na.Float) + 30.0 # km/s
	
	jzerr = na.sqrt((djzdx*xerr)**2 + (djzdy*yerr)**2 + 
			(djzdvx*vxerr)**2 + (djzdvy*vyerr)**2)

	# Eliminate all stars with jz > 0 and jz/jzerr < 2
	# I think these are they cuts they are doing
	idx = (na.where((jz < 0) & (na.abs(jz/jzerr) > 2)))[0]
	#idx = (na.where(jz < 0))[0]
	#idx = range(len(jz))

	cotTheta = vz / na.sqrt(vx**2 + vy**2)
	phi = na.arctan2(vy, vx)

	# Initial guess:
	p0 = na.zeros(2, type=na.Float)
	p0[0] = gens[5].uniform(0.1, 90)     # deg -- inclination
	p0[1] = gens[6].uniform(0.1, 360)     # deg -- omega

	# Setup properties of each free parameter.
	parinfo = {'relStep':10.0, 'step':10.0, 'fixed':0, 
		   'limits':[0.0,360.0],
		   'limited':[1,1], 'mpside':1}
	pinfo = [parinfo.copy() for i in range(len(p0))]

	pinfo[0]['limits'] = [0.0, 180.0]
	pinfo[1]['limits'] = [0.0, 360.0]

	# Stuff to pass into the fit function
	functargs = {'vx': vx[idx], 'vy': vy[idx], 'vz': vz[idx],
		     'vxerr':vxerr[idx], 'vyerr':vyerr[idx], 'vzerr':vzerr[idx]}

	m = mpfit.mpfit(fitfunc, p0, functkw=functargs, parinfo=pinfo,
			quiet=1)
	if (m.status <= 0): 
	    print('error message = ', m.errmsg)

	p = m.params

	incl[trial] = p[0]
	omeg[trial] = p[1]
	stars[trial] = len(idx)
	chi2[trial] = m.fnorm / (stars[trial] - len(p0))
	niter[trial] = m.niter
	
	n = [math.sin(p[0]) * math.cos(p[1]),
	     -math.sin(p[0]) * math.sin(p[1]),
	     -math.cos(p[0])]

	# Now look at the angle between the best fit normal vector
	# from the velocity data and the true r cross v normal vector.
	# Take the dot product between n and nreal.
	angle = na.arccos(n[0]*nx + n[1]*ny + n[2]*nz)
	angle *= (180.0 / math.pi)

	# What is the average angle and std angle
	angleAvg[trial] = angle.mean()
	angleStd[trial] = angle.stddev()

# 	print chi2[trial], chi2Pos[trial], incl[trial], inclPos[trial], \
# 	    omeg[trial], omegPos[trial], niter[trial], niterPos[trial]
# 	print angleAvg[trial], angleStd[trial]
	

    # Plot up chi2 for v-fit vs. chi2 for x-fit
    pylab.clf()
    pylab.semilogx(chi2, angleAvg, 'k.')
    pylab.errorbar(chi2, angleAvg, fmt='k.', yerr=angleStd)
    pylab.xlabel('Chi^2')
    pylab.ylabel('Angle w.r.t. Best Fit')
    foo = input('Contine?')

    # Probability of encountering solution with chi^2 < 2
    idx = (na.where(chi2 < 2.0))[0]
    print('Prob(chi^2 < 2) = %5.3f ' % (len(idx) / float(ntrials)))

    # Probability of encountering solution with chi^2 < 2 AND 
    # inclination = 20 - 30 and Omega = 160 - 170
    foo = (na.where((chi2 < 2.0) & (incl > 20) & (incl < 40)))[0]
    print('Prob of chi2 and incl = %5.3f' % (len(foo) / float(ntrials)))

    pylab.clf()
    pylab.subplot(2, 2, 1)
    pylab.hist(chi2, bins=na.arange(0, 10, 0.5))
    pylab.xlabel('Log Chi^2')

    pylab.subplot(2, 2, 2)
    pylab.hist(incl[idx])
    pylab.xlabel('Inclination for Chi^2 < 2')
    rng = pylab.axis()
    pylab.axis([0, 180, rng[2], rng[3]])

    pylab.subplot(2, 2, 3)
    pylab.hist(omeg[idx])
    pylab.xlabel('Omega for Chi^2 < 2')
    rng = pylab.axis()
    pylab.axis([0, 360, rng[2], rng[3]])

    pylab.subplot(2, 2, 4)
    pylab.hist(stars[idx])
    pylab.xlabel('Nstars for Chi^2 < 2')
    rng = pylab.axis()
    pylab.axis([0, 33, rng[2], rng[3]])

    pylab.savefig('diskTest.png')
    
    # Pickle everything
    foo = {'incl': incl, 'omeg': omeg, 'star': stars, 
	   'chi2': chi2, 'niter': niter}

    pickle.dump(foo, open('diskTestSave.pick', 'w'))
Exemple #31
0
def matchum(file1,
            file2,
            tol=10,
            perr=4,
            aerr=1.0,
            nmax=40,
            im_masks1=[],
            im_masks2=[],
            debug=0,
            domags=0,
            xrange=None,
            yrange=None,
            sigma=4,
            aoffset=0):
    '''Take the output of two sextractor runs and match up the objects with
   each other (find out which objects in the first file match up with
   objects in the second file.  The routine considers a 'match' to be any 
   two objects that are closer than tol pixels (after applying the shift).  
   Returns a 6-tuple:  (x1,y1,x2,y2,o1,o2).  o1 and o2 are the ojbects
   numbers such that o1[i] in file 1 corresponds to o2[i] in file 2.'''
    NA = num.NewAxis

    sexdata1 = readsex(file1)
    sexdata2 = readsex(file2)

    # Use the readsex data to get arrays of the (x,y) positions
    x1 = num.asarray(sexdata1[0]['X_IMAGE'])
    y1 = num.asarray(sexdata1[0]['Y_IMAGE'])
    x2 = num.asarray(sexdata2[0]['X_IMAGE'])
    y2 = num.asarray(sexdata2[0]['Y_IMAGE'])
    m1 = num.asarray(sexdata1[0]['MAG_BEST'])
    m2 = num.asarray(sexdata2[0]['MAG_BEST'])
    o1 = num.asarray(sexdata1[0]['NUMBER'])
    o2 = num.asarray(sexdata2[0]['NUMBER'])
    f1 = num.asarray(sexdata1[0]['FLAGS'])
    f2 = num.asarray(sexdata2[0]['FLAGS'])

    # First, make a cut on the flags:
    gids = num.where(f1 < 4)
    x1 = x1[gids]
    y1 = y1[gids]
    m1 = m1[gids]
    o1 = o1[gids]
    gids = num.where(f2 < 4)
    x2 = x2[gids]
    y2 = y2[gids]
    m2 = m2[gids]
    o2 = o2[gids]

    # next, if there is a range to use:
    if xrange is not None and yrange is not None:
        cond = num.greater(x1, xrange[0])*num.less(x1,xrange[1])*\
              num.greater(y1, yrange[0])*num.less(y1,yrange[1])
        gids = num.where(cond)
        x1 = x1[gids]
        y1 = y1[gids]
        m1 = m1[gids]
        o1 = o1[gids]
        cond = num.greater(x2, xrange[0])*num.less(x2,xrange[1])*\
              num.greater(y2, yrange[0])*num.less(y2,yrange[1])
        gids = num.where(cond)
        x2 = x2[gids]
        y2 = y2[gids]
        m2 = m2[gids]
        o2 = o2[gids]

    # Use the user masks
    for m in im_masks1:
        print "applying mask (%d,%d,%d,%d)" % tuple(m)
        condx = num.less(x1, m[0]) + num.greater(x1, m[1])
        condy = num.less(y1, m[2]) + num.greater(y1, m[3])
        gids = num.where(condx + condy)
        x1 = x1[gids]
        y1 = y1[gids]
        m1 = m1[gids]
        o1 = o1[gids]

    for m in im_masks2:
        print "applying mask (%d,%d,%d,%d)" % tuple(m)
        condx = num.less(x2, m[0]) + num.greater(x2, m[1])
        condy = num.less(y2, m[2]) + num.greater(y2, m[3])
        gids = num.where(condx + condy)
        x2 = x2[gids]
        y2 = y2[gids]
        m2 = m2[gids]
        o2 = o2[gids]

    if nmax:
        if len(x1) > nmax:
            ids = num.argsort(m1)[0:nmax]
            x1 = x1[ids]
            y1 = y1[ids]
            m1 = m1[ids]
            o1 = o1[ids]
        if len(x2) > nmax:
            ids = num.argsort(m2)[0:nmax]
            x2 = x2[ids]
            y2 = y2[ids]
            m2 = m2[ids]
            o2 = o2[ids]
    if debug:
        print "objects in frame 1:"
        print o1
        print "objects in frame 2:"
        print o2
        mp = pygplot.MPlot(2, 1, device='/XWIN')
        p = pygplot.Plot()
        p.point(x1, y1)
        [p.label(x1[i], y1[i], "%d" % o1[i]) for i in range(len(x1))]
        mp.add(p)
        p = pygplot.Plot()
        p.point(x2, y2)
        [p.label(x2[i], y2[i], "%d" % o2[i]) for i in range(len(x2))]
        mp.add(p)
        mp.plot()
        mp.close()

    # Now, we make 2-D arrays of all the differences in x and y between each pair
    #  of objects.  e.g., dx1[n,m] is the delta-x between object n and m in file 1 and
    #  dy2[n,m] is the y-distance between object n and m in file 2.
    dx1 = x1[NA, :] - x1[:, NA]
    dx2 = x2[NA, :] - x2[:, NA]
    dy1 = y1[NA, :] - y1[:, NA]
    dy2 = y2[NA, :] - y2[:, NA]
    # Same, but with angles
    da1 = num.arctan2(dy1, dx1) * 180 / num.pi
    da2 = num.arctan2(dy2, dx2) * 180 / num.pi
    # Same, but with absolute distances
    ds1 = num.sqrt(num.power(dx1, 2) + num.power(dy1, 2))
    ds2 = num.sqrt(num.power(dx2, 2) + num.power(dy2, 2))

    # Here's the real magic:  this is a matrix of matrices (4-D).  Consider 4 objects:
    #  objects i and j in file 1 and objects m and n in file 2.  dx[i,j,m,n] is the
    #  difference between delta-xs for objects i,j in file 1 and m,n in file 2.  If object
    #  i corresponds to object m and object j corresponds to object n, this should be a small
    #  number, irregardless of an overall shift in coordinate systems between file 1 and 2.
    dx = dx1[::, ::, NA, NA] - dx2[NA, NA, ::, ::]
    dy = dy1[::, ::, NA, NA] - dy2[NA, NA, ::, ::]
    da = da1[::, ::, NA, NA] - da2[NA, NA, ::, ::] + aoffset
    ds = ds1[::, ::, NA, NA] - ds2[NA, NA, ::, ::]
    # pick out close pairs.
    #use = num.less(dy,perr)*num.less(dx,perr)*num.less(num.abs(da),aerr)
    use = num.less(ds, perr) * num.less(num.abs(da), aerr)
    use = use.astype(num.Int32)

    #use = num.less(num.abs(da),perr)
    suse = num.add.reduce(num.add.reduce(use, 3), 1)
    print suse[0]

    guse = num.greater(suse, suse.flat.max() / 2)
    i = [j for j in range(x1.shape[0]) if num.sum(guse[j])]
    m = [num.argmax(guse[j]) for j in range(x1.shape[0]) if num.sum(guse[j])]
    xx0, yy0, oo0, mm0 = num.take([x1, y1, o1, m1], i, 1)
    xx1, yy1, oo1, mm1 = num.take([x2, y2, o2, m2], m, 1)
    if debug:
        mp = pygplot.MPlot(2, 1, device='/XWIN')
        p = pygplot.Plot()
        p.point(xx0, yy0)
        [p.label(xx0[i], yy0[i], "%d" % oo0[i]) for i in range(len(xx0))]
        mp.add(p)
        p = pygplot.Plot()
        p.point(xx1, yy1)
        [p.label(xx1[i], yy1[i], "%d" % oo1[i]) for i in range(len(xx1))]
        mp.add(p)
        mp.plot()
        mp.close()
    xshift, xscat = stats.bwt(xx0 - xx1)
    xscat = max([1.0, xscat])
    yshift, yscat = stats.bwt(yy0 - yy1)
    yscat = max([1.0, yscat])
    mshift, mscat = stats.bwt(mm0 - mm1)
    print "xscat = ", xscat
    print "yscat = ", yscat
    print "xshift = ", xshift
    print "yshift = ", yshift
    print "mshift = ", mshift
    print "mscat = ", mscat
    keep = num.less(num.abs(xx0-xx1-xshift),sigma*xscat)*\
          num.less(num.abs(yy0-yy1-yshift),sigma*yscat)
    # This is a list of x,y,object# in each file.
    xx0, yy0, oo0, xx1, yy1, oo1 = num.compress(keep,
                                                [xx0, yy0, oo0, xx1, yy1, oo1],
                                                1)

    if debug:
        print file1, oo0
        print file2, oo1
        mp = pygplot.MPlot(2, 1, device='temp.ps/CPS')
        p1 = pygplot.Plot()
        p1.point(xx0, yy0, symbol=25, color='red')
        for i in range(len(xx0)):
            p1.label(xx0[i], yy0[i], " %d" % oo0[i], color='red')
        mp.add(p1)
        p2 = pygplot.Plot()
        p2.point(xx1, yy1, symbol=25, color='green')
        for i in range(len(xx1)):
            p2.label(xx1[i], yy1[i], " %d" % oo1[i], color='green')
        mp.add(p2)
        mp.plot()
        mp.close()

    if domags:
        return (xx0, yy0, mm0, xx1, yy1, mm1, mshift, mscat, oo0, oo1)
    else:
        return (xx0, yy0, xx1, yy1, oo0, oo1)
def create_prediction_success_table(
    LCM, location_set, observed_choices_id, geographies=[], choice_method="mc", data_objects=None
):
    """this function creates a table tabulating number of agents observed versus predicted by geographies for location choice model
    LCM is an instance of Location Choice Model after run_estimation,
    location_set is the set of location in simulation, e.g. gridcell,
    observed_choice_id is the location_set id (e.g. grid_id) observed,
    geographies is a list of geographies to create prediction sucess table for,
    choice_method is the method used to select choice for agents, either mc or max_prob
    data_objects is the same as data_objects used to run LCM simulation, but includes entries for geographies
    """
    LCM.simulate_step()
    choices = sample_choice(LCM.model.probabilities, choice_method)
    choices_index = LCM.model_resources.translate("index")[choices]  # translate choices into index of location_set
    # maxprob_choices = sample_choice(LCM.model.probabilities, method="max_prob")  #max prob choice
    # maxprob_choices_index = LCM.model_resources.translate("index")[maxprob_choices]
    results = []

    gcs = location_set
    for geography in geographies:
        geo = data_objects.translate(geography)

        # get geo_id for observed agents
        gc_index = gcs.get_id_index(observed_choices_id)
        if geo.id_name[0] not in gcs.get_attribute_names():
            gcs.compute_variables(geo.id_name[0], resources=data_objects)
        geo_ids_obs = gcs.get_attribute(geo.id_name[0])[gc_index]

        #        obs = copy.deepcopy(agent_set)
        #        obs.subset_by_index(agents_index)
        #        obs.set_values_of_one_attribute(gcs.id_name[0], observed_choices_id)
        # resources.merge({"household": obs}) #, "gridcell": gcs, "zone": zones, "faz":fazes})
        #        obs.compute_variables(geo.id_name[0], resources=resources)
        #        obs_geo_ids = obs.get_attribute(geo.id_name[0])

        # get geo_id for simulated agents
        geo_ids_sim = gcs.get_attribute(geo.id_name[0])[choices_index]

        # sim = copy_dataset(obs)
        # sim.set_values_of_one_attribute(gcs.id_name[0], gcs.get_id_attribute()[mc_choices_index])
        # resources.merge({"household": sim})

        geo_size = geo.size()
        myids = geo.get_id_attribute()

        pred_matrix = zeros((geo_size, geo_size))
        p_success = zeros((geo_size,)).astype(Float32)

        f = 0
        for geo_id in myids:
            ids = geo_ids_sim[where(geo_ids_obs == geo_id)]  # get simulated geo_id for agents observed in this geo_id
            # resources.merge({"agents_index": agents_index_in_geo, "agent":sim})
            what = ones(ids.size())
            pred_matrix[f] = array(nd_image_sum(what, labels=ids, index=myids))
            print pred_matrix[f]
            if sum(pred_matrix[f]) > 0:
                p_success[f] = float(pred_matrix[f, f]) / sum(pred_matrix[f])

            # sim.increment_version(gcs.id_name[0])  #to trigger recomputation in next iteration
            f += 1

        print p_success
        results.append((pred_matrix.copy(), p_success.copy()))

    return results
Exemple #33
0
def distance_transform_cdt(input, structure = 'chessboard',
                        return_distances = True, return_indices = False,
                        distances = None, indices = None):
    """Distance transform for chamfer type of transforms.

    The structure determines the type of chamfering that is done. If
    the structure is equal to 'cityblock' a structure is generated
    using generate_binary_structure with a squared distance equal to
    1. If the structure is equal to 'chessboard', a structure is
    generated using generate_binary_structure with a squared distance
    equal to the rank of the array. These choices correspond to the
    common interpretations of the cityblock and the chessboard
    distance metrics in two dimensions.

    In addition to the distance transform, the feature transform can
    be calculated. In this case the index of the closest background
    element is returned along the first axis of the result.

    The return_distances, and return_indices flags can be used to
    indicate if the distance transform, the feature transform, or both
    must be returned.
    
    The distances and indices arguments can be used to give optional
    output arrays that must be of the correct size and type (both Int32).
    """
    if (not return_distances) and (not return_indices):
        msg = 'at least one of distances/indices must be specified'
        raise RuntimeError, msg    
    ft_inplace = isinstance(indices, numarray.NumArray)
    dt_inplace = isinstance(distances, numarray.NumArray)
    input = numarray.asarray(input)
    if structure == 'cityblock':
        rank = input.rank
        structure = generate_binary_structure(rank, 1)
    elif structure == 'chessboard':
        rank = input.rank
        structure = generate_binary_structure(rank, rank)
    else:
        try:
            structure = numarray.asarray(structure)
        except:
            raise RuntimeError, 'invalid structure provided'
        for s in structure.shape:
            if s != 3:
                raise RuntimeError, 'structure sizes must be equal to 3'
    if not structure.iscontiguous():
        structure = structure.copy()
    if dt_inplace:
        if distances.type() != numarray.Int32:
            raise RuntimeError, 'distances must be of Int32 type'    
        if distances.shape != input.shape:
            raise RuntimeError, 'distances has wrong shape'
        dt = distances
        dt[...] = numarray.where(input, -1, 0).astype(numarray.Int32)
    else:
        dt = numarray.where(input, -1, 0).astype(numarray.Int32)
    rank = dt.rank
    if return_indices:
        sz = dt.nelements()
        ft = numarray.arange(sz, shape=dt.shape, type = numarray.Int32)
    else:
        ft = None
    _nd_image.distance_transform_op(structure, dt, ft)
    dt = dt[tuple([slice(None, None, -1)] * rank)]
    if return_indices:
        ft = ft[tuple([slice(None, None, -1)] * rank)]
    _nd_image.distance_transform_op(structure, dt, ft)
    dt = dt[tuple([slice(None, None, -1)] * rank)]
    if return_indices:
        ft = ft[tuple([slice(None, None, -1)] * rank)]
        ft = numarray.ravel(ft)
        if ft_inplace:
            if indices.type() != numarray.Int32:
                raise RuntimeError, 'indices must of Int32 type'
            if indices.shape != (dt.rank,) + dt.shape:
                raise RuntimeError, 'indices has wrong shape'
            tmp = indices
        else:
            tmp = numarray.indices(dt.shape, type = numarray.Int32)
        for ii in range(tmp.shape[0]):
            rtmp = numarray.ravel(tmp[ii, ...])[ft]
            rtmp.setshape(dt.shape)
            tmp[ii, ...] = rtmp
        ft = tmp

    # construct and return the result
    result = []
    if return_distances and not dt_inplace:
        result.append(dt)
    if return_indices and not ft_inplace:
        result.append(ft)
    if len(result) == 2:
        return tuple(result)
    elif len(result) == 1:
        return result[0]
    else:
        return None
Exemple #34
0
def mask(clus_id, line_s):
    imagefile = c.imagefile
    sex_cata = c.sex_cata
    clus_cata = c.out_cata
    threshold = c.threshold
    thresh_area = c.thresh_area
    size = c.size
    mask_reg = c.mask_reg
    x = n.reshape(n.arange(size*size),(size,size)) % size
    x = x.astype(n.Float32)
    y = n.reshape(n.arange(size*size),(size,size)) / size
    y = y.astype(n.Float32)
    values = line_s.split()
    mask_file = 'ell_mask_' + str(imagefile)[:6] + '_'  + str(clus_id) + '.fits'
    xcntr_o  = float(values[1]) #x center of the object
    ycntr_o  = float(values[2]) #y center of the object
    xcntr = size / 2.0 + 1.0 + xcntr_o - int(xcntr_o)
    ycntr = size / 2.0 + 1.0 + ycntr_o - int(ycntr_o)
    mag    = float(values[7]) #Magnitude
    radius = float(values[9]) #Half light radius
    mag_zero = c.mag_zero #magnitude zero point
    sky	 = float(values[10]) #sky 
    pos_ang = float(values[11]) - 90.0 #position angle
    axis_rat = 1.0 / float(values[12]) #axis ration b/a
    major_axis = float(values[14])	#major axis of the object
    z = n.zeros((size,size))		
    for line_j in open(sex_cata,'r'):
        try:
            values = line_j.split()
            xcntr_n  = float(values[1]) #x center of the neighbour
            ycntr_n  = float(values[2]) #y center of the neighbour
            mag    = float(values[7]) #Magnitude
            radius = float(values[9]) #Half light radius
            sky      = float(values[10]) #sky
            pos_ang = float(values[11]) #position angle
            axis_rat = 1.0/float(values[12]) #axis ration b/a
            si = n.sin(pos_ang * n.pi / 180.0)
            co = n.cos(pos_ang * n.pi / 180.0)
            area = float(values[13])
            maj_axis = float(values[14])#major axis of neighbour
            eg = 1.0 - axis_rat
            one_minus_eg_sq    = (1.0-eg)**2.0
            if(abs(xcntr_n - xcntr_o) < size/2.0 and \
               abs(ycntr_n - ycntr_o) < size/2.0 and \
               xcntr_n != xcntr_o and ycntr_n != ycntr_o):
                if((xcntr_o - xcntr_n) < 0):
                    xn = xcntr + abs(xcntr_n - xcntr_o)
                if((ycntr_o - ycntr_n) < 0):
                    yn = ycntr + abs(ycntr_n - ycntr_o)
                if((xcntr_o - xcntr_n) > 0):
                    xn = xcntr - (xcntr_o -xcntr_n)
                if((ycntr_o - ycntr_n) > 0):
                    yn = ycntr - (ycntr_o -ycntr_n)
                tx = (x - xn + 0.5) * co + (y - yn + 0.5) * si
                ty = (xn - 0.5 -x) * si + (y - yn + 0.5) * co
                R = n.sqrt(tx**2.0 + ty**2.0 / one_minus_eg_sq)
                z[n.where(R<=mask_reg*maj_axis)] = 1
        except:
            i=1	
    hdu = pyfits.PrimaryHDU(z.astype(n.Float32))
    hdu.writeto(mask_file)
Exemple #35
0
def distance_transform_edt(input, sampling = None, 
                        return_distances = True, return_indices = False,
                        distances = None, indices = None):
    """Exact euclidean distance transform.

    In addition to the distance transform, the feature transform can
    be calculated. In this case the index of the closest background
    element is returned along the first axis of the result.

    The return_distances, and return_indices flags can be used to
    indicate if the distance transform, the feature transform, or both
    must be returned.

    Optionally the sampling along each axis can be given by the
    sampling parameter which should be a sequence of length equal to
    the input rank, or a single number in which the sampling is assumed
    to be equal along all axes.

    the distances and indices arguments can be used to give optional
    output arrays that must be of the correct size and type (Float64
    and Int32).
    """
    if (not return_distances) and (not return_indices):
        msg = 'at least one of distances/indices must be specified'
        raise RuntimeError, msg
    ft_inplace = isinstance(indices, numarray.NumArray)
    dt_inplace = isinstance(distances, numarray.NumArray)
    # calculate the feature transform
    input = numarray.where(input, 1, 0).astype(numarray.Int8)
    if sampling is not None:
        sampling = _ni_support._normalize_sequence(sampling, input.rank)
        sampling = numarray.asarray(sampling, type = numarray.Float64)
        if not sampling.iscontiguous():
            sampling = sampling.copy()
    if ft_inplace:
        ft = indices
        if ft.shape != (input.rank,) + input.shape:
            raise RuntimeError, 'indices has wrong shape'
        if ft.type() != numarray.Int32:
            raise RuntimeError, 'indices must be of Int32 type'
    else:
        ft = numarray.zeros((input.rank,) + input.shape,
                            type = numarray.Int32) 
    _nd_image.euclidean_feature_transform(input, sampling, ft)
    # if requested, calculate the distance transform
    if return_distances:
        dt = ft - numarray.indices(input.shape, type = ft.type())
        dt = dt.astype(numarray.Float64)
        if sampling is not None:
            for ii in range(len(sampling)):
                dt[ii, ...] *= sampling[ii]
        numarray.multiply(dt, dt, dt)
        if dt_inplace:
            dt = numarray.add.reduce(dt, axis = 0)
            if distances.shape != dt.shape:
                raise RuntimeError, 'indices has wrong shape'
            if distances.type() != numarray.Float64:
                raise RuntimeError, 'indices must be of Float64 type'
            numarray.sqrt(dt, distances)
            del dt
        else:
            dt = numarray.add.reduce(dt, axis = 0)
            dt = numarray.sqrt(dt)
    # construct and return the result
    result = []
    if return_distances and not dt_inplace:
        result.append(dt)
    if return_indices and not ft_inplace:
        result.append(ft)
    if len(result) == 2:
        return tuple(result)
    elif len(result) == 1:
        return result[0]
    else:
        return None
Exemple #36
0
def create_prediction_success_table(LCM, location_set, observed_choices_id, geographies=[], \
                                    choice_method='mc', data_objects=None):
    """this function creates a table tabulating number of agents observed versus predicted by geographies for location choice model
    LCM is an instance of Location Choice Model after run_estimation,
    location_set is the set of location in simulation, e.g. gridcell,
    observed_choice_id is the location_set id (e.g. grid_id) observed,
    geographies is a list of geographies to create prediction sucess table for,
    choice_method is the method used to select choice for agents, either mc or max_prob
    data_objects is the same as data_objects used to run LCM simulation, but includes entries for geographies
    """
    LCM.simulate_step()
    choices = sample_choice(LCM.model.probabilities, choice_method)
    choices_index = LCM.model_resources.translate("index")[
        choices]  #translate choices into index of location_set
    #maxprob_choices = sample_choice(LCM.model.probabilities, method="max_prob")  #max prob choice
    #maxprob_choices_index = LCM.model_resources.translate("index")[maxprob_choices]
    results = []

    gcs = location_set
    for geography in geographies:
        geo = data_objects.translate(geography)

        #get geo_id for observed agents
        gc_index = gcs.get_id_index(observed_choices_id)
        if geo.id_name[0] not in gcs.get_attribute_names():
            gcs.compute_variables(geo.id_name[0], resources=data_objects)
        geo_ids_obs = gcs.get_attribute(geo.id_name[0])[gc_index]

        #        obs = copy.deepcopy(agent_set)
        #        obs.subset_by_index(agents_index)
        #        obs.set_values_of_one_attribute(gcs.id_name[0], observed_choices_id)
        #resources.merge({"household": obs}) #, "gridcell": gcs, "zone": zones, "faz":fazes})
        #        obs.compute_variables(geo.id_name[0], resources=resources)
        #        obs_geo_ids = obs.get_attribute(geo.id_name[0])

        #get geo_id for simulated agents
        geo_ids_sim = gcs.get_attribute(geo.id_name[0])[choices_index]

        #sim = copy_dataset(obs)
        #sim.set_values_of_one_attribute(gcs.id_name[0], gcs.get_id_attribute()[mc_choices_index])
        #resources.merge({"household": sim})

        geo_size = geo.size()
        myids = geo.get_id_attribute()

        pred_matrix = zeros((geo_size, geo_size))
        p_success = zeros((geo_size, )).astype(Float32)

        f = 0
        for geo_id in myids:
            ids = geo_ids_sim[where(
                geo_ids_obs == geo_id
            )]  #get simulated geo_id for agents observed in this geo_id
            #resources.merge({"agents_index": agents_index_in_geo, "agent":sim})
            what = ones(ids.size())
            pred_matrix[f] = array(nd_image_sum(what, labels=ids, index=myids))
            print pred_matrix[f]
            if sum(pred_matrix[f]) > 0:
                p_success[f] = float(pred_matrix[f, f]) / sum(pred_matrix[f])

            #sim.increment_version(gcs.id_name[0])  #to trigger recomputation in next iteration
            f += 1

        print p_success
        results.append((pred_matrix.copy(), p_success.copy()))

    return results
Exemple #37
0
    flux=[]
    slices=[]
    for index in range(len(psf_stars['data']['X'])):
        x=float(psf_stars['data']['X'][index])
        y=float(psf_stars['data']['Y'][index])

        if x+xbox > data.getshape()[1] or x-xbox < 0 or y+ybox > data.getshape()[0] or y-ybox < 0:
            continue
        l=int(x-xbox)
        r=int(x+xbox)
        t=int(y-ybox)
        b=int(y+ybox)
        sec = data[t:b,l:r].copy()
        sec = shift(sec,(x-int(x),y-int(y)),order=3)
        obj =  N.where(sec > 2.0*average(average(sec)),1,0)
        sky2 = N.where(sec < 2.0*average(average(sec)),1,0)
        sky2 = N.sum(N.sum(sky2*sec))/N.sum(N.sum(sky2))

        (lab, nobj) = label(obj,structure=s)
        f = N.nd_image.find_objects(lab)
        skip=0
        
        msec = masked_outside(sec,sky2-5.0*sqrt(sky2),40000.)
        for i in range(1,nobj+1):
            (a,b)=shape(obj[f[i-1]])
            a*=1.
            b*=1.
            if a*b < 10:
                continue
            if a/b < 0.5 or b/a < 0.5 or a>25 or b> 25: