def radplot_defaults(param=None): myf = taskutil.get_global_namespace() a = odict() a['img'] = None a['wc'] = None a['rs'] = None a['azav'] = None a['blunit'] = None a['title'] = None a['rlab'] = None a['azavlab'] = None if (param == None): myf['__set_default_parameters'](a) elif (param == 'paramkeys'): return a.keys() else: if (a.has_key(param)): return a[param]
def radplot_defaults(param = None): myf = taskutil.get_global_namespace() a = odict() a['img'] = None a['wc'] = None a['rs'] = None a['azav'] = None a['blunit'] = None a['title'] = None a['rlab'] = None a['azavlab'] = None if(param == None): myf['__set_default_parameters'](a) elif(param == 'paramkeys'): return a.keys() else: if(a.has_key(param)): return a[param]
def make_labelled_ms(srcms, outputms, labelbases, ow=False, debug=False, whichdatacol='DATA'): """ Note: This has been more or less obsoleted by label_itered_ms and could probably be replaced by it after a little testing. Transform one measurement set into another with the same (ideally small but nontrivial) shape and reference frames, but data set to a sequence according to labelbases. The output MS is useful for testing tasks or tools that read and write MSes, since it is more obvious where each bit of data came from, and what has been done to it. Think of it as an alternative to making certain bits of data fluoresce under UV light. Arguments: srcms: The template MS. outputms: The output MS. labelbases: A dictionary of quant: number pairs, where quant is an index to label, and number is how quickly the label changes with the index. quant should be one of 'SCAN_NUMBER', 'DATA_DESC_ID', 'ANTENNA1', 'ANTENNA2', 'ARRAY_ID', 'FEED1', 'FEED2', 'FIELD_ID', 'OBSERVATION_ID', 'PROCESSOR_ID', 'STATE_ID', 'time', 'time_centroid', 'chan(nel)', or 'pol(whatever)' (case insensitive), but you will be let off with a warning if it isn't. TIME and TIME_CENTROID tend to hover around 4.7e9, so they are offset and scaled by subtracting the first value and dividing by the first integration interval. Example labelbases: labelbases = {'channel': 1.0, 'antenna1': complex(0, 1)} The data column in the output will be complex(channel index, antenna1). labelbases = {'SCAN_NUMBER': 1.0, 'FIELD_ID': 0.1, 'DATA_DESC_ID': complex(0, 1)} The data column in the output will go like complex(scan.field, spw) as long as field < 10. ow: Whether or not outputms can be overwritten if it exists. debug: If True and it hits an error, it will try to return what it has so far instead of raising an exception. whichdatacol: Which of DATA, MODEL_DATA, or CORRECTED_DATA to modify in the output. Case insensitive. Returns True or False as a success guess. """ # Make sure we have tb, casalog, clearcal, and ms. have_tools = False try: # Members that they likely have, but imposters (if any) do not. have_tools = hasattr(tb, 'colnames') and hasattr(ms, 'selectchannel') have_tools &= hasattr(casalog, 'post') and hasattr( clearcal, 'check_params') except NameError: pass # through to next clause... if not have_tools: try: from taskutil import get_global_namespace my_globals = get_global_namespace() tb = my_globals['tb'] ms = my_globals['ms'] casalog = my_globals['casalog'] clearcal = my_globals['clearcal'] except NameError: print "Could not find tb and ms. my_globals =", print "\n\t".join(my_globals.keys()) casalog.origin("make_labelled_ms") whichdatacol = whichdatacol.upper() if whichdatacol not in ['DATA', 'MODEL_DATA', 'CORRECTED_DATA']: casalog.post( whichdatacol + "is not one of DATA, MODEL_DATA, or CORRECTED_DATA.", 'EXCEPTION') try: if outputms != srcms: if os.path.isdir(outputms): if ow: shutil.rmtree(outputms) else: print "Use ow=True if you really want to overwrite", outputms return False shutil.copytree(srcms, outputms) except Exception, instance: casalog.post( "*** Error %s copying %s to %s." % (instance, srcms, outputms), 'SEVERE') return
def label_itered_ms(msname, labelbases, debug=False, datacol='DATA', incremental=False): """ Set datacol according to labelbases. Like make_labelled_ms() except it modifies in place using ms iteration to handle MSes that are large and/or have multiple data shapes. Arguments: msname: The MS to be modified. labelbases: A dictionary of quant: number or quant: func pairs, where quant is an index to label, and number is how quickly the label changes with the index. func should be a function that takes a data chunk as a dictionary (as returned by ms.getdata), and returns either a scalar or a numpy array with the same shape as the chunk's datacol. quant should be one of 'SCAN_NUMBER', 'DATA_DESC_ID', 'ANTENNA1', 'ANTENNA2', 'ARRAY_ID', 'FEED1', 'FEED2', 'FIELD_ID', 'OBSERVATION_ID', 'PROCESSOR_ID', 'STATE_ID', 'time', 'time_centroid', 'chan(nel)', or 'pol(whatever)' (case insensitive), but you will be let off with a warning if it isn't. TIME and TIME_CENTROID tend to hover around 4.7e9, so they are offset and scaled by subtracting the first value and dividing by the first integration interval. Example labelbases: labelbases = {'channel': 1.0, 'antenna1': complex(0, 1)} The data column in the output will be complex(channel index, antenna1). labelbases = {'SCAN_NUMBER': 1.0, 'FIELD_ID': 0.1, 'DATA_DESC_ID': complex(0, 1)} The data column in the output will go like complex(scan.field, spw) as long as field < 10. debug: If True and it hits an error, it will try to return what it has so far instead of raising an exception. datacol: Which of DATA, MODEL_DATA, or CORRECTED_DATA to modify in the output. Case insensitive. incremental: If True, the visibilities of the MS are added to. Otherwise (default), they are replaced. Returns True or False as a success guess. """ # Make sure we have tb, casalog, clearcal, and ms. have_tools = False try: # Members that they likely have, but imposters (if any) do not. have_tools = hasattr(tb, 'colnames') and hasattr(ms, 'selectchannel') have_tools &= hasattr(casalog, 'post') and hasattr( clearcal, 'check_params') except NameError: pass # through to next clause... if not have_tools: try: my_globals = get_global_namespace() tb = my_globals['tb'] ms = my_globals['ms'] casalog = my_globals['casalog'] clearcal = my_globals['clearcal'] except NameError: print "Could not find tb and ms. my_globals =", print "\n\t".join(my_globals.keys()) casalog.origin("label_itered_ms") datacol = datacol.upper() if datacol not in ['DATA', 'MODEL_DATA', 'CORRECTED_DATA']: casalog.post( datacol + "is not one of DATA, MODEL_DATA, or CORRECTED_DATA.", 'EXCEPTION') make_writable_recursively(msname) tb.open(msname) if datacol not in tb.colnames(): casalog.post("Adding scratch columns to " + msname, 'INFO') tb.close() clearcal(msname) else: tb.close() # Setup cols_to_get, polbase, and chanbase polbase = 0.0 chanbase = 0.0 cols_to_get = [datacol] for c in labelbases: # ms.getdata doesn't recognize OBSERVATION_ID, PROCESSOR_ID, or STATE_ID. if c.upper() in [ 'SCAN_NUMBER', 'DATA_DESC_ID', 'ANTENNA1', 'ANTENNA2', 'ARRAY_ID', 'FEED1', 'FEED2', 'FIELD_ID' ]: cols_to_get.append(c) elif c.upper() == 'TIME': cols_to_get.append(c) #cols_to_get.append('INTERVAL') elif c[:3].upper() == 'POL': polbase = labelbases[c] elif c[:4].upper() == 'CHAN': chanbase = labelbases[c] else: casalog.post("Do not know how to label by %s." % c, 'WARN') # Sigh - it seems to be necessary to get the list of DDIDs before # using ms.iter*, i.e. ms.iter* can't actually iter over > 1 DDID? # I wonder what happens if SPECTRAL_WINDOW has, as it often does, # more spws than are used in the main table. tb.open(msname + '/SPECTRAL_WINDOW') nddid = tb.nrows() tb.close() ms.open(msname, nomodify=False) for ddid in xrange(nddid): ms.selectinit(datadescid=ddid) ms.iterinit(maxrows=2000) ms.iterorigin() datacol = datacol.lower() ismore = True while ismore: chunk = ms.getdata(cols_to_get) if chunk.has_key('time'): chunk['time'] -= chunk['time'][0] #chunk['time'] /= chunk['interval'][0] if not incremental: chunk[datacol][:, :, :] = 0.0 for q in labelbases: # Relies on all the columns that are both in labelbases and chunk # being scalar. if chunk.has_key(q.lower()): if hasattr(labelbases[q], '__call__'): data = labelbases[q](chunk) if hasattr(data, 'shape'): chunk[datacol] += data else: chunk[datacol][:, :, :] += data else: chunk[datacol][:, :, :] += chunk[ q.lower()] * labelbases[q] datshape = chunk[datacol].shape if polbase: if hasattr(polbase, '__call__'): chunk[datacol] += polbase(chunk) else: for polind in xrange(datshape[0]): chunk[datacol][polind, :, :] += polbase * polind if chanbase: if hasattr(chanbase, '__call__'): chunk[datacol] += chanbase(chunk) else: for cind in xrange(datshape[1]): chunk[datacol][:, cind, :] += chanbase * cind ms.putdata({datacol: chunk[datacol]}) ismore = ms.iternext() try: addendum = msname + " " if incremental: addendum += "added to" else: addendum += "labelled" addendum += " by labelbases = {\n" qbs = [] for q, b in labelbases.items(): qb = "\t%16s: " % ("'" + q + "'") if hasattr(b, '__call__'): qb += b.__name__ + ' (function)' elif type(b) == complex: if b.real != 0.0: qb += "%.1g" % b.real if b.imag != 0.0: qb += "%+.1gi" % b.imag else: qb += "%.1g" % b qbs.append(qb) addendum += ",\n".join(qbs) addendum += "\n}" # The parms parameter is a false lead - ms.listhistory and listhistory # don't show it in the logger. ms.writehistory(addendum, origin="label_itered_ms") except Exception, instance: casalog.post( "*** Error updating %s's history: %s" % (msname, instance), 'SEVERE')
def radav(img, wc, maxrad=None): """ Returns the azimuthal average of image with name img about the world coordinate wc, and the weights and baseline unit used to calculate that average. The baseline unit is taken from maxrad, or from img's axisunits if maxrad is not specified. For now it assumes you are only interested in the first plane of img, and that it has square pixels. """ myf = taskutil.get_global_namespace() ia = myf['ia'] pl = myf['pl'] tb = myf['tb'] ia.open(img) imgsummary = ia.summary()['header'] imgshape = imgsummary['shape'] # For some unknown reason topixel returns a dictionary containing an array # of floats. centpix = ia.topixel(wc)['numeric'].tolist() ## # Figure out the shape of the azimuthal average. ## # Iterate over the non-xy axes. ## atleastoneplane = imgshape[2:] ## if len(atleastoneplane) < 1: # It was a simple 2D image. ## atleastoneplane ## for plane in xmrange_iter([1], ): if maxrad == None: maxdelta = [max(abs(imgshape[i] - centpix[i]), abs(centpix[i]))**2 for i in [0, 1]] maxdelta = int(pl.ceil(sqrt(sum(maxdelta)) + 1.5)) runit = imgsummary['axisunits'][0] else: maxdelta, runit = maxrad.split() if runit != 'pixels': maxdelta = int(pl.ceil(abs(float(maxdelta)/imgsummary['incr'][1]) + 1.5)) # We could use a lot of pixelvalue()s here, but that would be slow. Close # up and reopen as a tb. ia.close() tb.open(img) imgarr = tb.getcol('map') tb.close() azav = pl.zeros([maxdelta], dtype=float) wts = pl.zeros([maxdelta], dtype=float) cx, cy = centpix[0:2] firstplane = tuple(pl.zeros([len(imgshape[2:]) + 1]).tolist()) # Set up some numbers for gridding the image pixels onto the radial plot. # Instead of calculating the angle between the radius vector and an x or y # axis, treat it as pi/8 on average. oosqrt2 = 1.0 / sqrt(2.0) secpio8 = 1.0 / sqrt(0.5 * (1.0 + oosqrt2)) secpio8or2 = oosqrt2 * secpio8 # cos(pi/8)/sqrt(2) ohs1p1or2 = 0.5 * sqrt(1 + oosqrt2) # sin(pi/8)/sqrt(2) ohs1m1or2 = 0.5 * sqrt(1 - oosqrt2) minx = int(pl.ceil(max(0, cx - maxdelta))) maxx = int(pl.floor(min(imgshape[0], cx + maxdelta))) Miny = int(pl.ceil(max(0, cy - maxdelta))) Maxy = int(pl.floor(min(imgshape[1], cy + maxdelta))) maxd2 = maxdelta**2 maxdelta -= 1 for i in xrange(minx, maxx): possxspread = sqrt(maxd2 - (i - cx)**2) miny = max(Miny, int(pl.ceil(cy - possxspread))) maxy = min(Maxy, int(pl.floor(cy + possxspread))) for j in xrange(miny, maxy): # I don't understand why a[(0, 0, 0)] != a[[0, 0, 0]], but it does. pv = imgarr[i, j][firstplane] rad = sqrt((i - cx)**2 + (j - cy)**2) if rad > 0.8 and rad < maxdelta: pi = int(pl.floor(rad)) r = rad - pi if r < ohs1p1or2: if r > ohs1m1or2: wnear = sqrt(2) * (r - ohs1p1or2)**2 else: wnear = 0.5 - secpio8 * r else: wnear = 0.0 if r > 1.0 - ohs1p1or2: if r < 1.0 - ohs1m1or2: wfar = sqrt(2) * (r + ohs1p1or2 - 1.0)**2 else: wfar = 0.5 - secpio8 * (1.0 - r) else: wfar = 0.0 w0 = 1.0 - wnear - wfar #pi = int(pl.floor(rad + 0.5)) try: azav[pi - 1] += wnear * pv wts[pi - 1] += wnear azav[pi] += w0 * pv wts[pi] += w0 azav[pi + 1] += wfar * pv wts[pi + 1] += wfar except IndexError: print "pi:", pi print "rad:", rad print "maxdelta:", maxdelta else: # This *is* the center pixel. azav[0] += pv wts[0] += 1.0 for i in xrange(0, len(wts)): azav[i] /= wts[i] rs = [i for i in xrange(0, len(wts))] if runit != 'pixels': pixscale = abs(imgsummary['incr'][1]) for i in xrange(0, len(wts)): rs[i] *= pixscale return rs, azav, wts, runit
def radplot(img=None, wc=None, maxrad=None, rs=None, azav=None, runit=None, title=None, rlab=None, azavlab=None): """ Plots the azimuthal average of an image about a point. For now it assumes you are only interested in the first plane of img, and that it has square pixels. Keyword arguments: img -- Name of image to made a radial plot of. wc -- World coordinate to use as center. rs -- An optional list or array of radii to use instead of calculating. azav -- An optional list or array of azimuthal averages to use instead of calculating. blunit -- Unit of the radii. Derived from img if necessary. title -- Title for the plot. Defaults to 'Azimuthal average of %s about %s' % (img, wc). rlab -- Label for the radial axis. Defaults to 'Radius (runit)'. azavlab -- Label for the azimuthal average axis. Defaults to 'Azimuthal average (azavunit)' """ myf = taskutil.get_global_namespace() ia = myf['ia'] pl = myf['pl'] myf['taskname'] = 'radplot' #taskutil.update_myf(myf) ## # Set all task parameters to their global value ## img = myf['img'] ## wc = myf['wc'] ## maxrad = myf['maxrad'] ## rs = myf['rs'] ## azav = myf['azav'] ## runit = myf['runit'] ## title = myf['title'] ## rlab = myf['rlab'] ## azavlab = myf['azavlab'] casalog = myf['casalog'] # include the logger ## #Add type/menu/range error checking here ## # if type(mask)==str: mask=[mask] ## arg_desc = {} ## # Name Variable Allowed type(s) ## arg_desc['img'] = (img, str) ## arg_desc['wc'] = (wc, (str,list,dict)) ## arg_desc['maxrad'] = (maxrad, (str, dict, list)) ## # arg_desc['rs'] = (rs, (list, array)) ## # arg_desc['azav'] = (azav, (list, array)) ## arg_desc['runit'] = (runit, str) ## arg_desc['title'] = (title, str) ## arg_desc['rlab'] = (rlab, str) ## arg_desc['azavlab'] = (azavlab, str) ## if(taskutils.check_param_types(myf['taskname'], arg_desc)): ## return taskutil.startlog(casalog, myf['taskname']) try: if rs == None or azav == None: rs, azav, wts, runit = radav(img, wc, maxrad) pl.clf() pl.ion() pl.plot(rs, azav) ia.open(img) imgsummary = ia.summary() ia.close() imgsummary = imgsummary['header'] if azavlab == None: azavlab = 'Azimuthal average (%s)' % imgsummary['unit'] pl.ylabel(azavlab) if rlab == None: rlab = 'Radius' if runit == None: runit = imgsummary['axisunits'][0] pl.xlabel('%s (%s)' % (rlab, runit)) if title == None: title = 'Azimuthal average of %s about %s' % (img, wc) pl.title(title) except TypeError, e: print myf['taskname'], "-- TypeError: ", e taskutil.endlog(casalog, myf['taskname']) return
def make_labelled_ms(srcms, outputms, labelbases, ow=False, debug=False, whichdatacol='DATA'): """ Note: This has been more or less obsoleted by label_itered_ms and could probably be replaced by it after a little testing. Transform one measurement set into another with the same (ideally small but nontrivial) shape and reference frames, but data set to a sequence according to labelbases. The output MS is useful for testing tasks or tools that read and write MSes, since it is more obvious where each bit of data came from, and what has been done to it. Think of it as an alternative to making certain bits of data fluoresce under UV light. Arguments: srcms: The template MS. outputms: The output MS. labelbases: A dictionary of quant: number pairs, where quant is an index to label, and number is how quickly the label changes with the index. quant should be one of 'SCAN_NUMBER', 'DATA_DESC_ID', 'ANTENNA1', 'ANTENNA2', 'ARRAY_ID', 'FEED1', 'FEED2', 'FIELD_ID', 'OBSERVATION_ID', 'PROCESSOR_ID', 'STATE_ID', 'time', 'time_centroid', 'chan(nel)', or 'pol(whatever)' (case insensitive), but you will be let off with a warning if it isn't. TIME and TIME_CENTROID tend to hover around 4.7e9, so they are offset and scaled by subtracting the first value and dividing by the first integration interval. Example labelbases: labelbases = {'channel': 1.0, 'antenna1': complex(0, 1)} The data column in the output will be complex(channel index, antenna1). labelbases = {'SCAN_NUMBER': 1.0, 'FIELD_ID': 0.1, 'DATA_DESC_ID': complex(0, 1)} The data column in the output will go like complex(scan.field, spw) as long as field < 10. ow: Whether or not outputms can be overwritten if it exists. debug: If True and it hits an error, it will try to return what it has so far instead of raising an exception. whichdatacol: Which of DATA, MODEL_DATA, or CORRECTED_DATA to modify in the output. Case insensitive. Returns True or False as a success guess. """ # Make sure we have tb, casalog, clearcal, and ms. have_tools = False try: # Members that they likely have, but imposters (if any) do not. have_tools = hasattr(tb, 'colnames') and hasattr(ms, 'selectchannel') have_tools &= hasattr(casalog, 'post') and hasattr(clearcal, 'check_params') except NameError: pass # through to next clause... if not have_tools: try: from taskutil import get_global_namespace my_globals = get_global_namespace() tb = my_globals['tb'] ms = my_globals['ms'] casalog = my_globals['casalog'] clearcal = my_globals['clearcal'] except NameError: print "Could not find tb and ms. my_globals =", print "\n\t".join(my_globals.keys()) casalog.origin("make_labelled_ms") whichdatacol = whichdatacol.upper() if whichdatacol not in ['DATA', 'MODEL_DATA', 'CORRECTED_DATA']: casalog.post(whichdatacol + "is not one of DATA, MODEL_DATA, or CORRECTED_DATA.", 'EXCEPTION') try: if outputms != srcms: if os.path.isdir(outputms): if ow: shutil.rmtree(outputms) else: print "Use ow=True if you really want to overwrite", outputms return False shutil.copytree(srcms, outputms) except Exception, instance: casalog.post("*** Error %s copying %s to %s." % (instance, srcms, outputms), 'SEVERE') return
def label_itered_ms(msname, labelbases, debug=False, datacol='DATA', incremental=False): """ Set datacol according to labelbases. Like make_labelled_ms() except it modifies in place using ms iteration to handle MSes that are large and/or have multiple data shapes. Arguments: msname: The MS to be modified. labelbases: A dictionary of quant: number or quant: func pairs, where quant is an index to label, and number is how quickly the label changes with the index. func should be a function that takes a data chunk as a dictionary (as returned by ms.getdata), and returns either a scalar or a numpy array with the same shape as the chunk's datacol. quant should be one of 'SCAN_NUMBER', 'DATA_DESC_ID', 'ANTENNA1', 'ANTENNA2', 'ARRAY_ID', 'FEED1', 'FEED2', 'FIELD_ID', 'OBSERVATION_ID', 'PROCESSOR_ID', 'STATE_ID', 'time', 'time_centroid', 'chan(nel)', or 'pol(whatever)' (case insensitive), but you will be let off with a warning if it isn't. TIME and TIME_CENTROID tend to hover around 4.7e9, so they are offset and scaled by subtracting the first value and dividing by the first integration interval. Example labelbases: labelbases = {'channel': 1.0, 'antenna1': complex(0, 1)} The data column in the output will be complex(channel index, antenna1). labelbases = {'SCAN_NUMBER': 1.0, 'FIELD_ID': 0.1, 'DATA_DESC_ID': complex(0, 1)} The data column in the output will go like complex(scan.field, spw) as long as field < 10. debug: If True and it hits an error, it will try to return what it has so far instead of raising an exception. datacol: Which of DATA, MODEL_DATA, or CORRECTED_DATA to modify in the output. Case insensitive. incremental: If True, the visibilities of the MS are added to. Otherwise (default), they are replaced. Returns True or False as a success guess. """ # Make sure we have tb, casalog, clearcal, and ms. have_tools = False try: # Members that they likely have, but imposters (if any) do not. have_tools = hasattr(tb, 'colnames') and hasattr(ms, 'selectchannel') have_tools &= hasattr(casalog, 'post') and hasattr(clearcal, 'check_params') except NameError: pass # through to next clause... if not have_tools: try: my_globals = get_global_namespace() tb = my_globals['tb'] ms = my_globals['ms'] casalog = my_globals['casalog'] clearcal = my_globals['clearcal'] except NameError: print "Could not find tb and ms. my_globals =", print "\n\t".join(my_globals.keys()) casalog.origin("label_itered_ms") datacol = datacol.upper() if datacol not in ['DATA', 'MODEL_DATA', 'CORRECTED_DATA']: casalog.post(datacol + "is not one of DATA, MODEL_DATA, or CORRECTED_DATA.", 'EXCEPTION') make_writable_recursively(msname) tb.open(msname) if datacol not in tb.colnames(): casalog.post("Adding scratch columns to " + msname, 'INFO') tb.close() clearcal(msname) else: tb.close() # Setup cols_to_get, polbase, and chanbase polbase = 0.0 chanbase = 0.0 cols_to_get = [datacol] for c in labelbases: # ms.getdata doesn't recognize OBSERVATION_ID, PROCESSOR_ID, or STATE_ID. if c.upper() in ['SCAN_NUMBER', 'DATA_DESC_ID', 'ANTENNA1', 'ANTENNA2', 'ARRAY_ID', 'FEED1', 'FEED2', 'FIELD_ID']: cols_to_get.append(c) elif c.upper() == 'TIME': cols_to_get.append(c) #cols_to_get.append('INTERVAL') elif c[:3].upper() == 'POL': polbase = labelbases[c] elif c[:4].upper() == 'CHAN': chanbase = labelbases[c] else: casalog.post("Do not know how to label by %s." % c, 'WARN') # Sigh - it seems to be necessary to get the list of DDIDs before # using ms.iter*, i.e. ms.iter* can't actually iter over > 1 DDID? # I wonder what happens if SPECTRAL_WINDOW has, as it often does, # more spws than are used in the main table. tb.open(msname + '/SPECTRAL_WINDOW') nddid = tb.nrows() tb.close() ms.open(msname, nomodify=False) for ddid in xrange(nddid): ms.selectinit(datadescid=ddid) ms.iterinit(maxrows=2000) ms.iterorigin() datacol = datacol.lower() ismore = True while ismore: chunk = ms.getdata(cols_to_get) if chunk.has_key('time'): chunk['time'] -= chunk['time'][0] #chunk['time'] /= chunk['interval'][0] if not incremental: chunk[datacol][:,:,:] = 0.0 for q in labelbases: # Relies on all the columns that are both in labelbases and chunk # being scalar. if chunk.has_key(q.lower()): if hasattr(labelbases[q], '__call__'): data = labelbases[q](chunk) if hasattr(data, 'shape'): chunk[datacol] += data else: chunk[datacol][:,:,:] += data else: chunk[datacol][:,:,:] += chunk[q.lower()] * labelbases[q] datshape = chunk[datacol].shape if polbase: if hasattr(polbase, '__call__'): chunk[datacol] += polbase(chunk) else: for polind in xrange(datshape[0]): chunk[datacol][polind, :, :] += polbase * polind if chanbase: if hasattr(chanbase, '__call__'): chunk[datacol] += chanbase(chunk) else: for cind in xrange(datshape[1]): chunk[datacol][:, cind, :] += chanbase * cind ms.putdata({datacol: chunk[datacol]}) ismore = ms.iternext() try: addendum = msname + " " if incremental: addendum += "added to" else: addendum += "labelled" addendum += " by labelbases = {\n" qbs = [] for q, b in labelbases.items(): qb = "\t%16s: " % ("'" + q + "'") if hasattr(b, '__call__'): qb += b.__name__ + ' (function)' elif type(b) == complex: if b.real != 0.0: qb += "%.1g" % b.real if b.imag != 0.0: qb += "%+.1gi" % b.imag else: qb += "%.1g" % b qbs.append(qb) addendum += ",\n".join(qbs) addendum += "\n}" # The parms parameter is a false lead - ms.listhistory and listhistory # don't show it in the logger. ms.writehistory(addendum, origin="label_itered_ms") except Exception, instance: casalog.post("*** Error updating %s's history: %s" % (msname, instance), 'SEVERE')
def radav(img, wc, maxrad=None): """ Returns the azimuthal average of image with name img about the world coordinate wc, and the weights and baseline unit used to calculate that average. The baseline unit is taken from maxrad, or from img's axisunits if maxrad is not specified. For now it assumes you are only interested in the first plane of img, and that it has square pixels. """ myf = taskutil.get_global_namespace() ia = myf['ia'] pl = myf['pl'] tb = myf['tb'] ia.open(img) imgsummary = ia.summary()['header'] imgshape = imgsummary['shape'] # For some unknown reason topixel returns a dictionary containing an array # of floats. centpix = ia.topixel(wc)['numeric'].tolist() ## # Figure out the shape of the azimuthal average. ## # Iterate over the non-xy axes. ## atleastoneplane = imgshape[2:] ## if len(atleastoneplane) < 1: # It was a simple 2D image. ## atleastoneplane ## for plane in xmrange_iter([1], ): if maxrad == None: maxdelta = [ max(abs(imgshape[i] - centpix[i]), abs(centpix[i]))**2 for i in [0, 1] ] maxdelta = int(pl.ceil(sqrt(sum(maxdelta)) + 1.5)) runit = imgsummary['axisunits'][0] else: maxdelta, runit = maxrad.split() if runit != 'pixels': maxdelta = int( pl.ceil(abs(float(maxdelta) / imgsummary['incr'][1]) + 1.5)) # We could use a lot of pixelvalue()s here, but that would be slow. Close # up and reopen as a tb. ia.close() tb.open(img) imgarr = tb.getcol('map') tb.close() azav = pl.zeros([maxdelta], dtype=float) wts = pl.zeros([maxdelta], dtype=float) cx, cy = centpix[0:2] firstplane = tuple(pl.zeros([len(imgshape[2:]) + 1]).tolist()) # Set up some numbers for gridding the image pixels onto the radial plot. # Instead of calculating the angle between the radius vector and an x or y # axis, treat it as pi/8 on average. oosqrt2 = 1.0 / sqrt(2.0) secpio8 = 1.0 / sqrt(0.5 * (1.0 + oosqrt2)) secpio8or2 = oosqrt2 * secpio8 # cos(pi/8)/sqrt(2) ohs1p1or2 = 0.5 * sqrt(1 + oosqrt2) # sin(pi/8)/sqrt(2) ohs1m1or2 = 0.5 * sqrt(1 - oosqrt2) minx = int(pl.ceil(max(0, cx - maxdelta))) maxx = int(pl.floor(min(imgshape[0], cx + maxdelta))) Miny = int(pl.ceil(max(0, cy - maxdelta))) Maxy = int(pl.floor(min(imgshape[1], cy + maxdelta))) maxd2 = maxdelta**2 maxdelta -= 1 for i in xrange(minx, maxx): possxspread = sqrt(maxd2 - (i - cx)**2) miny = max(Miny, int(pl.ceil(cy - possxspread))) maxy = min(Maxy, int(pl.floor(cy + possxspread))) for j in xrange(miny, maxy): # I don't understand why a[(0, 0, 0)] != a[[0, 0, 0]], but it does. pv = imgarr[i, j][firstplane] rad = sqrt((i - cx)**2 + (j - cy)**2) if rad > 0.8 and rad < maxdelta: pi = int(pl.floor(rad)) r = rad - pi if r < ohs1p1or2: if r > ohs1m1or2: wnear = sqrt(2) * (r - ohs1p1or2)**2 else: wnear = 0.5 - secpio8 * r else: wnear = 0.0 if r > 1.0 - ohs1p1or2: if r < 1.0 - ohs1m1or2: wfar = sqrt(2) * (r + ohs1p1or2 - 1.0)**2 else: wfar = 0.5 - secpio8 * (1.0 - r) else: wfar = 0.0 w0 = 1.0 - wnear - wfar #pi = int(pl.floor(rad + 0.5)) try: azav[pi - 1] += wnear * pv wts[pi - 1] += wnear azav[pi] += w0 * pv wts[pi] += w0 azav[pi + 1] += wfar * pv wts[pi + 1] += wfar except IndexError: print "pi:", pi print "rad:", rad print "maxdelta:", maxdelta else: # This *is* the center pixel. azav[0] += pv wts[0] += 1.0 for i in xrange(0, len(wts)): azav[i] /= wts[i] rs = [i for i in xrange(0, len(wts))] if runit != 'pixels': pixscale = abs(imgsummary['incr'][1]) for i in xrange(0, len(wts)): rs[i] *= pixscale return rs, azav, wts, runit