def replace_electrodes(electrode_loc, selected_chs, new_layout, ch_pos):
    
    # 1- electrode location according to Biosemi denomination
    electrode_loc += 1
    (i, j) = np.where(new_layout == (electrode_loc))
    replace_chs = np.zeros(0, dtype = int)        
    if i-1 > 0: replace_chs = np.append(replace_chs, layout[i-1,j])
    if j-1 > 0: replace_chs = np.append(replace_chs, layout[i,j-1])
    if i+1 < 9: replace_chs = np.append(replace_chs, layout[i+1,j])
    if j+1 < 9: replace_chs = np.append(replace_chs, layout[i,j+1])
    if i  != 0 and j !=  0: replace_chs = np.append(replace_chs, layout[i-1,j-1])
    if i+1 < 9 and j !=  0: replace_chs = np.append(replace_chs, layout[i+1,j-1])
    if i+1 < 9 and j+1 < 9: replace_chs = np.append(replace_chs, layout[i+1,j+1])
    if i  != 0 and j+1 < 9: replace_chs = np.append(replace_chs, layout[i-1,j+1])  
    tempo_idx = np.nonzero(replace_chs)
    replace_chs  = replace_chs[tempo_idx[0]]
    # 2- re-assignment for python processing
    replace_chs -= 1
    # 3- checking if the selected channels does not encompass bad electrodes
    selected_chs = np.array(selected_chs)
    in_array = np.setmember1d(replace_chs, selected_chs)
    in_array = np.where(in_array == False)
    replace_chs  = replace_chs[in_array[0]]
    # 4- returning only till four components
    if len(replace_chs) >= 4: replace_chs = replace_chs[:4]
    # 5- updating the new label of the channels
    tempo = np.setmember1d(np.array(ch_pos), replace_chs)
    replace_chs = np.where(tempo == True)
    replace_chs = replace_chs[0]
    return replace_chs
Esempio n. 2
0
def intersect_array1d (a,b,rows):
    """
    Finds set intersection of two vectors.    
    Returns: c the intersect vector and index vectors ia and ib such
    that c = a(ia) and c = b(ib).
    """    
    c  = np.intersect1d(a,b)
    ma = np.setmember1d(a,b)
    mb = np.setmember1d(b,a)
    ia = np.nonzero(ma)[0]
    ib = np.nonzero(mb)[0]
    return c, ia, ib
def replace_refIDX(Ref_CHs, selected_chs, num_ch):
    'Remove the bad electrodes and ignore them in the referencing process,\
     (via original scalp location denomination)'
     
    if type(Ref_CHs) == list: Ref_CHs = np.array(Ref_CHs, dtype = int)
    selected_chs = np.array(selected_chs)
    in_array = np.setmember1d(Ref_CHs, selected_chs)
    in_array = np.where(in_array == False)
    if in_array[0].tolist() == []: 
        all_chs  = np.arange(num_ch, dtype = int)
        in_array = np.setmember1d(all_chs, selected_chs)
        in_array = np.where(in_array == False)
        Ref_CHs  = all_chs[in_array[0]]
    else:
        Ref_CHs = Ref_CHs[in_array[0]]
    return Ref_CHs
Esempio n. 4
0
def ismember_newer(totest, members):
    """
    A setmember1d, which works for totest arrays with duplicate values
    """
    uniques_in_test, rev_idx = np.unique1d(totest, return_inverse=True)
    uniques_in_members_mask = np.setmember1d(uniques_in_test, members)
    # Use this instead if members is not unique
    # uniques_in_members_mask = setmember1d(uniques_in_test, unique1d(members))
    return uniques_in_members_mask[rev_idx]
Esempio n. 5
0
def comp_ed (spdata1,abdata1,spdata2,abdata2):
    """Calculate the compositional Euclidean Distance between two sites
    
    Ref: Thibault KM, White EP, Ernest SKM. 2004. Temporal dynamics in the
    structure and composition of a desert rodent community. Ecology. 85:2649-2655.
    
    """     
    abdata1 = (abdata1 * 1.0) / sum(abdata1)
    abdata2 = (abdata2 * 1.0) / sum(abdata2)
    intersect12 = set(spdata1).intersection(spdata2)
    setdiff12 = np.setdiff1d(spdata1,spdata2)
    setdiff21 = np.setdiff1d(spdata2,spdata1)
    relab1 = np.concatenate(((abdata1[np.setmember1d(spdata1,list(intersect12)) == 1]),
                             abdata1[np.setmember1d(spdata1,setdiff12)], 
                             np.zeros(len(setdiff21))))
    relab2 = np.concatenate((abdata2[np.setmember1d(spdata2,list(intersect12)) == 1],
                              np.zeros(len(setdiff12)),
                              abdata2[np.setmember1d(spdata2,setdiff21)]))
    return np.sqrt(sum((relab1 - relab2) ** 2))
Esempio n. 6
0
def comp_ed(spdata1, abdata1, spdata2, abdata2):
    """Calculate the compositional Euclidean Distance between two sites
    
    Ref: Thibault KM, White EP, Ernest SKM. 2004. Temporal dynamics in the
    structure and composition of a desert rodent community. Ecology. 85:2649-2655.
    
    """
    abdata1 = (abdata1 * 1.0) / sum(abdata1)
    abdata2 = (abdata2 * 1.0) / sum(abdata2)
    intersect12 = set(spdata1).intersection(spdata2)
    setdiff12 = np.setdiff1d(spdata1, spdata2)
    setdiff21 = np.setdiff1d(spdata2, spdata1)
    relab1 = np.concatenate(
        ((abdata1[np.setmember1d(spdata1, list(intersect12)) == 1]),
         abdata1[np.setmember1d(spdata1,
                                setdiff12)], np.zeros(len(setdiff21))))
    relab2 = np.concatenate(
        (abdata2[np.setmember1d(spdata2, list(intersect12)) == 1],
         np.zeros(len(setdiff12)), abdata2[np.setmember1d(spdata2,
                                                          setdiff21)]))
    return np.sqrt(sum((relab1 - relab2)**2))
except IOError as e:
    print 'Could not find associated sinks_creation_info.dat. Make sure it resides in the same dir as sinks_evol.dat'

# Read data files
data = np.genfromtxt(datafile)
print 'SINKS_EVOL.DAT file read.'
creation_info = np.genfromtxt(creation_info_file)
taglist = creation_info[1:,0].astype(int)
print 'SINKS_CREATION_INFO.DAT file read.'

tags = data[1:,0].astype(int)
time = data[1:,1]

# Check if there are other sink particles that were present from the start.
# These would not have been listed in the file sinks_creation_info.dat
extras = np.nonzero(~np.setmember1d(tags,taglist))[0] # Finds unique indices where 
                                                      # entries in 'tags' do not 
                                                      # match entries in 'taglist'
for i in extras:
    taglist = np.concatenate(([tags[i]],taglist))

nparticles = len(taglist)
print '\nThere are {0} sink particles in our simulation'.format(nparticles)
print 'Sink particle tags:'
print taglist

# Prepare sinks_evol_fixed.dat for writing
f = open(relpath+'/'+'sinks_evol_fixed.dat','w')
g = open(datafile,'r')
header = g.readline()
hwords = str.split(header)
Esempio n. 8
0
except IOError as e:
    print 'Could not find associated sinks_creation_info.dat. Make sure it resides in the same dir as sinks_evol.dat'

# Read data files
data = np.genfromtxt(datafile)
print 'SINKS_EVOL.DAT file read.'
creation_info = np.genfromtxt(creation_info_file)
taglist = creation_info[1:, 0].astype(int)
print 'SINKS_CREATION_INFO.DAT file read.'

tags = data[1:, 0].astype(int)
time = data[1:, 1]

# Check if there are other sink particles that were present from the start.
# These would not have been listed in the file sinks_creation_info.dat
extras = np.nonzero(~np.setmember1d(tags, taglist))[
    0]  # Finds unique indices where
# entries in 'tags' do not
# match entries in 'taglist'
for i in extras:
    taglist = np.concatenate(([tags[i]], taglist))

nparticles = len(taglist)
print '\nThere are {0} sink particles in our simulation'.format(nparticles)
print 'Sink particle tags:'
print taglist

# Prepare sinks_evol_fixed.dat for writing
f = open(relpath + '/' + 'sinks_evol_fixed.dat', 'w')
g = open(datafile, 'r')
header = g.readline()
Esempio n. 9
0
def write_statistics(database, outfile, vType, minVcount=1):
    """Traverses the vessel database, assigns modified strahler orders to each
    vascular tree and groups segments of the same order into elements. Writes 
    the statistics based on each tree's elements to an excel file.
    INPUT: database: The path to the vessel database.
           outfile: The name of the output file containing the vessel 
                    statistics.
           vType: The type of trees. This can be either 'a' (arterial) or 'v'
                  (venous).
           minVcount: The minimum number of vertices in a tree to be considered
                      in the statistics. 
    OUTPUT: None (file written to disk).                
    """
    
    lengthDB = [[] for i in xrange(25)]
    vcountDB = []
    ## Prepare excel file for writing:
    #font0 = xlwt.Font()
    #font0.name = 'Times New Roman'
    #font0.colour_index = 2
    #font0.bold = True
    #style0 = xlwt.XFStyle()
    #style0.font = font0

    #font1 = xlwt.Font()
    #font1.name = 'Times New Roman'
    #font1.colour_index = 0
    #font1.bold = True
    #style1 = xlwt.XFStyle()
    #style1.font = font1

    #wb = xlwt.Workbook()

    # Loop over trees in database and write statistics to file:
    trees = [(int(os.path.basename(t)[:-4]), t) for t in 
             glob.glob(os.path.join(database, '*.pkl'))]
    trees = [t[1] for t in sorted(trees)]
    tenPercSteps = np.round(np.linspace(1, 10, 10) * len(trees) / 10.)
    percentages = np.linspace(10, 100, 10)
    print('Writing database...')
    treeCount = 0
    for i, tree in enumerate(trees):
        print(tree)
        # Read and process tree data:
        G = vgm.read_pkl(tree)
        vcountDB.append(G.vcount())
        if (i+1) in tenPercSteps:
            print('%i percent completed' % int(percentages[np.setmember1d(tenPercSteps, [i+1])][0]))
        if G.vcount() < minVcount:
            continue
#-------------------------------------------------------------
        # Delete order 2 vertices and add length and conductance 
        # (initially None for the new, joined edges). Rename the attachment
        # vertex (vertex indexing changes while order 2 vertices are removed):
        avr = G.vs[G['attachmentVertex']]['r']
        G.delete_order_two_vertices()    
        G.es['length'] = [sum(e['lengths']) for e in G.es]
        vgm.add_conductance(G, vType)
        # Delete selfloops:
        G.delete_selfloops()
        KDT = kdtree.KDTree(G.vs['r'])
        G['attachmentVertex'] = int(KDT.query(avr)[1])
#-------------------------------------------------------------

        strahler.modified_strahler_order(G, 0.99)
        Gel = strahler.assign_elements(G)
        maxOrder = max(Gel.vs['order'])
        cl = 0 # current line

        # Write tree ID, sample ID, vcount, ecount:
        ws = wb.add_sheet('Tree' + str(treeCount))
        treeCount = treeCount + 1
        ws.write(cl, 0, 'Tree ID', style0)
        ws.write(cl, 1, int(string.strip(os.path.basename(tree), '.pkl')))
        cl = cl + 1
        ws.write(cl, 0, 'Sample ID', style0)
        ws.write(cl, 1, G['sampleName'])
        cl = cl + 1        
        ws.write(cl, 0, 'Vertices', style0)
        ws.write(cl, 1, G.vcount())
        cl = cl + 1
        ws.write(cl, 0, 'Edges', style0)
        ws.write(cl, 1, G.ecount())
        cl = cl + 1
        ws.write(cl, 0, 'Dist to border', style0)
        ws.write(cl, 1, G['distanceToBorder'])
        cl = cl + 1
        ws.write(cl, 0, 'Root offset', style0)
        ws.write(cl, 1, G['avZOffset'])        
        cl = cl + 3
        
        # Write 'order' header:
        ws.write(cl, 0, 'Order', style0)
        for order in range(maxOrder + 1):
            ws.write(cl, order+1, order, style0)
        cl = cl + 1
        
        # Write element frequency 'N':
        ws.write(cl, 0, 'N', style1)
        for order in range(maxOrder + 1):
            ws.write(cl, order+1, len(Gel.vs(order_eq=order)))
        cl = cl + 1    
        
        # Write 'segments per element':
        # Note that this is the total number of elements of an order divided by
        # the total number of segments in that order (i.e. not on an element-by-
        # element basis).
        ws.write(cl, 0, 'SpE', style1)
        for order in range(maxOrder + 1):
            numElements = len(Gel.vs(order_eq=order))
            numSegments = sum([len(v['edges']) for v in Gel.vs(order_eq=order)])
            ws.write(cl, order+1, numSegments / numElements)
        cl = cl + 1

        # Write element 'length' and 'diameter':
        for eProperty in ['length', 'diameter']:
            ws.write(cl, 0, eProperty + ' (mean) [micron]', style1)
            ws.write(cl+1, 0, eProperty + ' (std) [micron]', style1)
            for order in range(maxOrder + 1):
                data = []
                for element in Gel.vs(order_eq=order):
                    if eProperty == 'length':
                        # Length of element is sum of segment-lengths:
                        data.append(sum(G.es[element['edges']][eProperty]))
                    else:
                        # Diameter of element is mean of segment-diameters:
                        data.append(np.mean(G.es[element['edges']][eProperty]))
                ws.write(cl, order+1, np.mean(data))
                ws.write(cl+1, order+1, np.std(data))
                if eProperty == 'length':
                    lengthDB[order].append(np.mean(data))
            cl = cl + 2
            
         
        # Add some additional whitespace:
        cl = cl + 2
         
        # Compute connectivity matrix and branching angles: 
        cm =  dict(zip(range(maxOrder + 1), 
                       [dict(zip(range(maxOrder + 1), 
                                 [[] for o in range(maxOrder + 1)]))
                         for o in range(maxOrder + 1)]))
        ba =  dict(zip(range(maxOrder + 1), 
                       [dict(zip(range(maxOrder + 1), 
                                 [[] for o in range(maxOrder + 1)]))
                         for o in range(maxOrder + 1)]))                         
        # Loop over the elements of G, which are the vertices of Gel:                                         
        for elm in Gel.vs:
            order = elm['order']
            orderCounter = dict(zip(range(maxOrder + 1), 
                                    [0 for o in range(maxOrder + 1)]))
            branchingAngle = dict(zip(range(maxOrder + 1), 
                                      [[] for o in range(maxOrder + 1)]))
                                      
            # Loop over neighboring elements:                        
            for nElm in Gel.vs(Gel.neighbors(elm.index)):
                # Find vertex common to both elements. 
                # Note that two elements can theoretically touch at more than 
                # one location, here only the first common vertex is considered.
                vertex = int(np.array(elm['vertices'])[np.setmember1d(elm['vertices'], nElm['vertices'])][0])
                # Find the edge that is part of the current element, adjacent,
                # and upstream to vertex. This ensures a 'natural', flow-based
                # choice of nElm-elm edge angle if the nElm edge connects to 
                # than one elm-edges.
                # Note that edges at equal mean pressures are not considered!
                edges = np.array(elm['edges'])[np.setmember1d(elm['edges'], G.adjacent(vertex))]
                pMax = G.vs[vertex]['pressure']
                edge = None
                for e in edges:
                    e = int(e) # Convert from numpy integer
                    meanP = np.mean(G.vs[G.es[e].tuple]['pressure'])
                    if meanP > pMax:
                        edge = e
                        pMax = meanP
                # Skip this element-junction if the current element does not 
                # contain upstream edges:        
                if edge is None:
                    continue       
                # Find the edge that is part of the neighboring element, 
                # adjacent, and downstream to vertex. This ensures a 'natural', 
                # flow-based choice of nElm-elm edge angle if the elm edge 
                # connects to more than one nElm-edges.
                # Note that edges at equal mean pressures are not considered!
                edges = np.array(nElm['edges'])[np.setmember1d(nElm['edges'], G.adjacent(vertex))]                                
                pMin = G.vs[vertex]['pressure']
                nEdge = None
                for e in edges:
                    e = int(e) # Convert from numpy integer
                    meanP = np.mean(G.vs[G.es[e].tuple]['pressure'])
                    if meanP < pMin:
                        nEdge = e
                        pMin = meanP
                # Skip this element-junction if the neighboring element does not 
                # contain downstream edges:        
                if nEdge is None:
                    continue                         

                # Increase the count of the connectivity matrix and compute the
                # branching angle:
                orderCounter[nElm['order']] += 1
                # A spherical region around the vertex is defined as the 
                # branching region. The radius of the sphere is equal to the 
                # maximum of the radii of the adjacent edges at the location of
                # the vertex.
                # The angle of an elements edge is then computed from the point
                # where the edge penetrates the sphere up to a distance of 
                # about twice the vessel diameter away from the point of 
                # penetration (unless of corse, the edge is shorter).
                # Radius of branching sphere:
                maxDiameter = 0
                for e in G.adjacent(vertex):
                    if G.es[e].source != vertex:
                        d = G.es[e]['diameters'][-1]
                    else:
                        d = G.es[e]['diameters'][0]
                    if d > maxDiameter:
                        maxDiameter = d
                radius = maxDiameter / 2.0
                # Vectors of the two branching edges:
                vectors = []
                for e in (edge, nEdge):
                    if G.es[e].source != vertex:
                        points = G.es[e]['points'][::-1]
                    else:
                        points = G.es[e]['points']
                    
                    startPoint = round(len(points) * 
                                       radius / G.es[e]['length'])
                    if startPoint > len(points)-1 or startPoint < 1:
                        startPoint = 1

                    # In order to determine the direction of a vessel leaving a
                    # bifurcation, we consider the points it comprises starting
                    # just outside the bifurcation (i.e. at maximum radius of  
                    # all vessels incident to the bifurcation) and ending two
                    # diameter lengths away from the bifurcation (angles 
                    # deviate least from the mean of angles determined from 1,
                    # 2, 3, 4 diameter lengths).
                    endPoint = min(len(points),
                                   round(len(points) * 
                                         (radius + 2.0 * G.es[e]['diameter']) /
                                         G.es[e]['length']))
                    endPoint = max(endPoint, startPoint+1)                     
                    points = points[startPoint-1:endPoint]
                    
                    vectors.append(vgm.average_path_direction(points))                           
                
                # The magnidudes of the vectors are one, hence the angle can
                # be computed without dividing by them. Note that an angle of 0
                # degrees means that both parent and child vessel are going in 
                # the same direction, whereas 180 degrees signifies opposite 
                # directions: 
                angle = np.pi - np.arccos(np.dot(vectors[0], vectors[1]))
                branchingAngle[nElm['order']].append(angle)          
     
            # Update the connectivity matrix and branching angle matrix with the
            # results of the current element:
            for nOrder in orderCounter.keys():
                cm[order][nOrder].append(orderCounter[nOrder])    
                ba[order][nOrder].extend(branchingAngle[nOrder])

        # Write connectivity matrix:        
        ws.write(cl, 0, 'Connectivity matrix', style1)
        cl = cl + 1
        ws.write(cl, 0, 'Order', style0)
        neo = [] # Number of elements of order o
        for j, order in enumerate(range(maxOrder + 1)):
            ws.write(cl, order+1, order, style0)
            ws.write(cl+j+1, 0, order, style0)
            neo.append(len(Gel.vs(order_eq=order)))
        cl = cl + 1                  
        for order in range(maxOrder + 1):
            for nOrder in range(order, maxOrder + 1):
            # Use the following line instead of the previous, if order zero
            # elements never connect to other order zero elements:
            # for nOrder in range(max(1, order), maxOrder + 1):
                if cm[order][nOrder] != []:
                    ws.write(order+cl, nOrder+1, sum(cm[order][nOrder]) / neo[nOrder])         
        # Equal spacing for all worksheets, irrespective of maximum order:
        cl = cl + 20
        #cl = cl + maxOrder + 3
        
        # Write branching angles:
        ws.write(cl, 0, 'Branching angles [deg]', style1)
        cl = cl + 1
        ws.write(cl, 0, 'Order', style0)
        neo = [] # Number of elements of order o
        for j, order in enumerate(range(maxOrder + 1)):
            ws.write(cl, order+1, order, style0)
            ws.write(cl+j+1, 0, order, style0)
            neo.append(len(Gel.vs(order_eq=order)))
        cl = cl + 1                  
        for order in range(maxOrder + 1):
            for nOrder in range(order, maxOrder + 1):
            # Use the following line instead of the previous, if order zero
            # elements never connect to other order zero elements:
            # for nOrder in range(max(1, order), maxOrder + 1):
                if ba[order][nOrder] != []:
                    ws.write(order+cl, nOrder+1, np.rad2deg(np.mean(ba[order][nOrder])))
        # Equal spacing for all worksheets, irrespective of maximum order:
        cl = cl + 20
        #cl = cl + maxOrder + 3
        
        # Compute irrigation / drainage volume:
        G.vs['z'] = [r[2] for r in G.vs['r']]
        zMin = np.min(G.vs['z'])
        zMax = np.max(G.vs['z'])
        cylinderLength = 200
        intervals = vgm.intervals(zMin, zMax, (zMax - zMin) / cylinderLength)
        pl = np.concatenate(G.es['points'], axis=0)
        z = pl[:,2]        
        
        if intervals == []:
            intervals = [[zMin, zMax]]
        volume = 0.0    
        for i, interval in enumerate(intervals):           
            points = pl[np.nonzero(map(all, zip(z>interval[0],
                                                z<interval[1])))[0], :] # use points
            points = np.array(zip(points[:,0], points[:,1]))
            #points = np.array(G.vs(z_ge=interval[0], z_le=interval[1])['r']) # use vertices
            
            if len(points) < 3:
                continue
            # Add slab volume: 
            com = np.mean(points, axis=0)
            hullpts = convex_hull.qhull(points)
            radius = np.mean([np.linalg.norm(com - hp) for hp in hullpts])
            volume += np.pi * radius**2 * cylinderLength           
                 
        ws.write(cl, 0, 'Irrigation / drainage volume [microL]', style1)
        cl = cl + 1
        ws.write(cl, 0, volume / 1e9) # Conversion micron**3 --> microL
        cl = cl + 3
        
    wb.save(outfile)
    
    #wb2 = xlwt.Workbook()
    #ws2 = wb2.add_sheet('Element Length')
    cl = 0
    maxOrder = 0
    maxN = []
    for order, ldb in enumerate(lengthDB):
        if len(ldb) > 0:
            maxOrder = order
        maxN.append(len(ldb))    
    for order in xrange(maxOrder + 1):
        ws2.write(0, order, order, style0)
        for i, l in enumerate(lengthDB[order]): 
            ws2.write(i+1, order, l, style1)
    wb2.save('elmLength_' + vType + '.xls')
Esempio n. 10
0
def aperture_correction(image, filter, plot=True, silent=False, dir=workDir):
    """
    Calculate the aperture corrections to be applied to the 
    science data. Hard coded for H and Kp to be wide camera pixels
    and Lp to be narrow camera pixels. Also hard coded is the optimal
    aperture radius to extract photometry on the science images.
    """
    pixScale = 1.

    # This is the radius we will use to get the aperture photometry
    # for the science field. Note that the L' science aperture is 
    # larger than the standard aperture... this is because we see
    # a huge difference in the PSF from the standards (NGS) to the 
    # science field (LGS), so to gather the same amount of light, 
    # we need to use different apertures.
    # FYI, future reference, we should use LGS even on the standards.
    extractRadius = {'h': 60, 'kp': 70, 'lp2': 80}
    standardRadius = find_apertures.standard_aperture

    ##########
    # Load up information on the science fields
    ##########
    pfile = open(dir + 'cog_' + image + '.dat', 'r')
    dRadius = pickle.load(pfile)
    dFlux = pickle.load(pfile)
    dMag = pickle.load(pfile)
    dMerr = pickle.load(pfile)
    dGcurve = pickle.load(pfile)
    
    # convert to narrow camera pixels
    dRadius = dRadius * pixScale
    
    ##########
    # Read in the growth curves from the 
    # photometric standards.
    ##########
    sdir = dir + '../find_apertures/'
    sRadius, growthCurves = find_apertures.getGrowthCurve(filter, dir=sdir)
    sGcurve = growthCurves.mean(axis=0)
    sGcurveErr = growthCurves.std(axis=0)

    if plot == True:
        # #########
        #  Overplot them to double check that they are both sensible
        # #########
        py.figure(1)
        py.clf()
        p1 = py.plot(sRadius[1:], sGcurve[1:], 'k-')
        
        gcurveLo = sGcurve[1:] - sGcurveErr[1:]
        gcurveHi = sGcurve[1:] + sGcurveErr[1:]
        py.fill_between(sRadius[1:], gcurveLo, gcurveHi, 
                        color='grey', alpha=0.3)
        # not quite right cuz in mags but close enough.

        p2 = py.plot(dRadius[1:], dGcurve[1:], 'r.')
        
        py.legend((p1, p2), ('Standards', 'Science PSF'), loc='lower right')
        py.xlabel('Radius (narrow pixels)')
        py.ylabel('Magnitude Difference')
        py.savefig(dir + 'cog_compare_' + image + '.png')


        py.xlim(15, np.array([dRadius.max(), sRadius.max()]).min())
        py.ylim(-0.03, 0)
        py.savefig(dir + 'cog_compare_' + image + '_zoom.png')

        # Plot a difference figure
        sidx = np.where(np.setmember1d(sRadius, dRadius) == True)[0]
        didx = np.where(np.setmember1d(dRadius, sRadius) == True)[0]
        tmp = np.zeros(len(sRadius[1:]))

        py.clf()
        p1 = py.plot(sRadius[1:], tmp, 'k-')
        py.fill_between(sRadius[1:], -1.*sGcurveErr[1:], sGcurveErr[1:], 
                        color='grey', alpha=0.3)
        p2 = py.plot(sRadius[sidx], dGcurve[didx] - sGcurve[sidx], 'r.')
        
        py.legend((p1, p2), ('Standards', 'Science PSF'), loc='lower right')
        py.xlabel('Radius (narrow pixels)')
        py.ylabel('Magnitude Difference')
        py.savefig(dir + 'cog_compare_diff_' + image + '.png')

        py.xlim(15, np.array([dRadius.max(), sRadius.max()]).min())
        py.ylim(-0.03, 0.03)
        py.savefig(dir + 'cog_compare_diff_' + image + '_zoom.png')
        

    ##########
    # Calc Aperture Corrections
    ##########
    # Calculate the aperture correction to get the scale factor between
    # the science aperture (PSF size) and the standard star aperture (outer).
    dataApSize = extractRadius[filter]
    stanApSize = standardRadius[filter]

    ### 1. Go from full-size to aperture-size on science PSF
    fluxApIdx = np.where(dRadius == dataApSize)[0][0]

    stf2aper_flux = dFlux[fluxApIdx]
    stf2aper_mags = -2.5*math.log10(dFlux[fluxApIdx])
    stf2aper_flux_err = 0.0
    stf2aper_mags_err = 0.0

    ### 2. Go from aperture-size on science PSF to aperture-size on Standards
    # Integrate the standard star Curve of Growth (from the outside in)
    # Don't do any of this for L' since the PSFs are so different from 
    # LGS to NGS.
#     if filter != 'lp2':
    if True:
        dataApIdx = np.where(sRadius == dataApSize)[0][0]
        stanApIdx = np.where(sRadius == stanApSize)[0][0]

        if dataApIdx > stanApIdx:
            magCorr = sGcurve[stanApIdx:dataApIdx].sum() * -1.0
        else:
            magCorr = sGcurve[stanApIdx:dataApIdx:-1].sum()

        fluxCorr = 10**(magCorr/-2.5)

        # Determine the uncertainty by calculating the aperture correction
        # from the individual growth curves and then using std().
        curveCount = growthCurves.shape[0]
        magCorr1 = np.zeros(curveCount, dtype=float)
        fluxCorr1 = np.zeros(curveCount, dtype=float)
        for aa in range(curveCount):
            magCorr1[aa] = growthCurves[aa, :dataApIdx:-1].sum()
            fluxCorr1[aa] = 10**(magCorr1[aa]/-2.5)

        aper2stan_flux = fluxCorr
        aper2stan_mags = magCorr
        aper2stan_flux_err = fluxCorr1.std()
        aper2stan_mags_err = 2.5 * math.log10(math.e) * aper2stan_flux_err
    else:
        aper2stan_flux = 1.
        aper2stan_mags = 0.
        aper2stan_flux_err = 0.
        aper2stan_mags_err = 0.
    
    ### 3. Combine the two effects
    stf2stan_flux = stf2aper_flux * aper2stan_flux
    stf2stan_mags = stf2aper_mags + aper2stan_mags
    stf2stan_flux_err = stf2aper_flux * aper2stan_flux_err
    stf2stan_mags_err = 2.5 * math.log10(math.e) * stf2stan_flux_err

    #*** NO APERTURE CORRECTION FOR L' ***
#     if filter == 'lp2':
#         stf2stan_flux = 1.
#         stf2stan_mags = 0.
#         stf2stan_flux_err = 0.
#         stf2stan_mags_err = 0.


    ##########
    # Output
    ##########
    if not silent:
        print '*** APERTURE CORRECTIONS FOR %s ***' % image
        print 'Science Aperture Size  = %d narrow pixels (%.3f arcsec)' % \
            (dataApSize, dataApSize * 0.00995)
        print 'Standard Aperture Size = %d narrow pixels (%.3f arcsec)' % \
            (stanApSize, stanApSize * 0.00995)
        
        print ''
        print 'Aperture Correction to go from Starfinder Magnitudes'
        print 'to Aperture Magnitudes:'
        print '    Flux Ratio = %.3f +/- %.3f' % \
            (stf2aper_flux, stf2aper_flux_err)
        print '    Mag Differ = %.3f +/- %.3f' % \
            (stf2aper_mags, stf2aper_mags_err)
        print '    Aper Flux = STF Flux * %.3f' % (stf2aper_flux)
        print '    Aper Mags = STF Mags + %.3f' % (stf2aper_mags)
        print ''
        print 'Aperture Correction to go from Aperture Magnitudes '
        print 'to Standard Apparent Magnitudes:'
        print '    Flux Ratio = %.3f +/- %.3f' % \
            (aper2stan_flux, aper2stan_flux_err)
        print '    Mag Differ = %.3f +/- %.3f' % \
            (aper2stan_mags, aper2stan_mags_err)
        print '    Stan Flux = Aper Flux * %.3f' % (aper2stan_flux)
        print '    Stan Mags = Aper Mags + %.3f + ZP' % (aper2stan_mags)
        print ''
        print 'Aperture Correction to go from Starfinder Magnitudes '
        print 'to Standard Apparent Magnitudes:'
        print '    Flux Ratio = %.3f +/- %.3f' % \
            (stf2stan_flux, stf2stan_flux_err)
        print '    Mag Differ = %.3f +/- %.3f' % \
            (stf2stan_mags, stf2stan_mags_err)
        print '    Stan Flux = STF Flux * %.3f' % (stf2stan_flux)
        print '    Stan Mags = STF Mags + %.3f + ZP' % (stf2stan_mags)
    

    return (stf2stan_flux, stf2stan_flux_err, stf2stan_mags, stf2stan_mags_err)
Esempio n. 11
0
 def select_studies(self, studies):
     common = np.setmember1d(self.studies, studies)
     self.active_studies = np.where(common)[0]
Esempio n. 12
0
	def select_studies(self, studies):
		common = np.setmember1d(self.studies, studies)
		self.active_studies = np.where(common)[0]
Esempio n. 13
0
def aperture_correction(image, filter, plot=True, silent=False, dir=workDir):
    """
    Calculate the aperture corrections to be applied to the 
    science data. Hard coded for H and Kp to be wide camera pixels
    and Lp to be narrow camera pixels. Also hard coded is the optimal
    aperture radius to extract photometry on the science images.
    """
    pixScale = 1.

    # This is the radius we will use to get the aperture photometry
    # for the science field. Note that the L' science aperture is
    # larger than the standard aperture... this is because we see
    # a huge difference in the PSF from the standards (NGS) to the
    # science field (LGS), so to gather the same amount of light,
    # we need to use different apertures.
    # FYI, future reference, we should use LGS even on the standards.
    extractRadius = {'h': 60, 'kp': 70, 'lp2': 80}
    standardRadius = find_apertures.standard_aperture

    ##########
    # Load up information on the science fields
    ##########
    pfile = open(dir + 'cog_' + image + '.dat', 'r')
    dRadius = pickle.load(pfile)
    dFlux = pickle.load(pfile)
    dMag = pickle.load(pfile)
    dMerr = pickle.load(pfile)
    dGcurve = pickle.load(pfile)

    # convert to narrow camera pixels
    dRadius = dRadius * pixScale

    ##########
    # Read in the growth curves from the
    # photometric standards.
    ##########
    sdir = dir + '../find_apertures/'
    sRadius, growthCurves = find_apertures.getGrowthCurve(filter, dir=sdir)
    sGcurve = growthCurves.mean(axis=0)
    sGcurveErr = growthCurves.std(axis=0)

    if plot == True:
        # #########
        #  Overplot them to double check that they are both sensible
        # #########
        py.figure(1)
        py.clf()
        p1 = py.plot(sRadius[1:], sGcurve[1:], 'k-')

        gcurveLo = sGcurve[1:] - sGcurveErr[1:]
        gcurveHi = sGcurve[1:] + sGcurveErr[1:]
        py.fill_between(sRadius[1:],
                        gcurveLo,
                        gcurveHi,
                        color='grey',
                        alpha=0.3)
        # not quite right cuz in mags but close enough.

        p2 = py.plot(dRadius[1:], dGcurve[1:], 'r.')

        py.legend((p1, p2), ('Standards', 'Science PSF'), loc='lower right')
        py.xlabel('Radius (narrow pixels)')
        py.ylabel('Magnitude Difference')
        py.savefig(dir + 'cog_compare_' + image + '.png')

        py.xlim(15, np.array([dRadius.max(), sRadius.max()]).min())
        py.ylim(-0.03, 0)
        py.savefig(dir + 'cog_compare_' + image + '_zoom.png')

        # Plot a difference figure
        sidx = np.where(np.setmember1d(sRadius, dRadius) == True)[0]
        didx = np.where(np.setmember1d(dRadius, sRadius) == True)[0]
        tmp = np.zeros(len(sRadius[1:]))

        py.clf()
        p1 = py.plot(sRadius[1:], tmp, 'k-')
        py.fill_between(sRadius[1:],
                        -1. * sGcurveErr[1:],
                        sGcurveErr[1:],
                        color='grey',
                        alpha=0.3)
        p2 = py.plot(sRadius[sidx], dGcurve[didx] - sGcurve[sidx], 'r.')

        py.legend((p1, p2), ('Standards', 'Science PSF'), loc='lower right')
        py.xlabel('Radius (narrow pixels)')
        py.ylabel('Magnitude Difference')
        py.savefig(dir + 'cog_compare_diff_' + image + '.png')

        py.xlim(15, np.array([dRadius.max(), sRadius.max()]).min())
        py.ylim(-0.03, 0.03)
        py.savefig(dir + 'cog_compare_diff_' + image + '_zoom.png')

    ##########
    # Calc Aperture Corrections
    ##########
    # Calculate the aperture correction to get the scale factor between
    # the science aperture (PSF size) and the standard star aperture (outer).
    dataApSize = extractRadius[filter]
    stanApSize = standardRadius[filter]

    ### 1. Go from full-size to aperture-size on science PSF
    fluxApIdx = np.where(dRadius == dataApSize)[0][0]

    stf2aper_flux = dFlux[fluxApIdx]
    stf2aper_mags = -2.5 * math.log10(dFlux[fluxApIdx])
    stf2aper_flux_err = 0.0
    stf2aper_mags_err = 0.0

    ### 2. Go from aperture-size on science PSF to aperture-size on Standards
    # Integrate the standard star Curve of Growth (from the outside in)
    # Don't do any of this for L' since the PSFs are so different from
    # LGS to NGS.
    #     if filter != 'lp2':
    if True:
        dataApIdx = np.where(sRadius == dataApSize)[0][0]
        stanApIdx = np.where(sRadius == stanApSize)[0][0]

        if dataApIdx > stanApIdx:
            magCorr = sGcurve[stanApIdx:dataApIdx].sum() * -1.0
        else:
            magCorr = sGcurve[stanApIdx:dataApIdx:-1].sum()

        fluxCorr = 10**(magCorr / -2.5)

        # Determine the uncertainty by calculating the aperture correction
        # from the individual growth curves and then using std().
        curveCount = growthCurves.shape[0]
        magCorr1 = np.zeros(curveCount, dtype=float)
        fluxCorr1 = np.zeros(curveCount, dtype=float)
        for aa in range(curveCount):
            magCorr1[aa] = growthCurves[aa, :dataApIdx:-1].sum()
            fluxCorr1[aa] = 10**(magCorr1[aa] / -2.5)

        aper2stan_flux = fluxCorr
        aper2stan_mags = magCorr
        aper2stan_flux_err = fluxCorr1.std()
        aper2stan_mags_err = 2.5 * math.log10(math.e) * aper2stan_flux_err
    else:
        aper2stan_flux = 1.
        aper2stan_mags = 0.
        aper2stan_flux_err = 0.
        aper2stan_mags_err = 0.

    ### 3. Combine the two effects
    stf2stan_flux = stf2aper_flux * aper2stan_flux
    stf2stan_mags = stf2aper_mags + aper2stan_mags
    stf2stan_flux_err = stf2aper_flux * aper2stan_flux_err
    stf2stan_mags_err = 2.5 * math.log10(math.e) * stf2stan_flux_err

    #*** NO APERTURE CORRECTION FOR L' ***
    #     if filter == 'lp2':
    #         stf2stan_flux = 1.
    #         stf2stan_mags = 0.
    #         stf2stan_flux_err = 0.
    #         stf2stan_mags_err = 0.

    ##########
    # Output
    ##########
    if not silent:
        print '*** APERTURE CORRECTIONS FOR %s ***' % image
        print 'Science Aperture Size  = %d narrow pixels (%.3f arcsec)' % \
            (dataApSize, dataApSize * 0.00995)
        print 'Standard Aperture Size = %d narrow pixels (%.3f arcsec)' % \
            (stanApSize, stanApSize * 0.00995)

        print ''
        print 'Aperture Correction to go from Starfinder Magnitudes'
        print 'to Aperture Magnitudes:'
        print '    Flux Ratio = %.3f +/- %.3f' % \
            (stf2aper_flux, stf2aper_flux_err)
        print '    Mag Differ = %.3f +/- %.3f' % \
            (stf2aper_mags, stf2aper_mags_err)
        print '    Aper Flux = STF Flux * %.3f' % (stf2aper_flux)
        print '    Aper Mags = STF Mags + %.3f' % (stf2aper_mags)
        print ''
        print 'Aperture Correction to go from Aperture Magnitudes '
        print 'to Standard Apparent Magnitudes:'
        print '    Flux Ratio = %.3f +/- %.3f' % \
            (aper2stan_flux, aper2stan_flux_err)
        print '    Mag Differ = %.3f +/- %.3f' % \
            (aper2stan_mags, aper2stan_mags_err)
        print '    Stan Flux = Aper Flux * %.3f' % (aper2stan_flux)
        print '    Stan Mags = Aper Mags + %.3f + ZP' % (aper2stan_mags)
        print ''
        print 'Aperture Correction to go from Starfinder Magnitudes '
        print 'to Standard Apparent Magnitudes:'
        print '    Flux Ratio = %.3f +/- %.3f' % \
            (stf2stan_flux, stf2stan_flux_err)
        print '    Mag Differ = %.3f +/- %.3f' % \
            (stf2stan_mags, stf2stan_mags_err)
        print '    Stan Flux = STF Flux * %.3f' % (stf2stan_flux)
        print '    Stan Mags = STF Mags + %.3f + ZP' % (stf2stan_mags)

    return (stf2stan_flux, stf2stan_flux_err, stf2stan_mags, stf2stan_mags_err)