Пример #1
0
def main(wf):
    img_file, need_format, ext_format = get_paste_img_file()
    url = os.getenv('qiniu_domain_url')
    bulk = os.getenv('bulk')
    if img_file:
        # 有图片,生成一个唯一的图片名
        upload_name = "%s.%s" % (int(time.time() * 1000), ext_format)
        if need_format:
            markdown_url = 'http://%s/%s/%s' % (url, bulk,  upload_name)
        else:
            markdown_url = 'http://%s/%s/%s' % (url, bulk, upload_name)




        if not upload_qiniu(img_file.name, upload_name):
            util.notice("上传失败")
        else:
        # 复制到剪切板栗
        # os.system("echo '%s' | pbcopy" % markdown_url)


        # wf.add_item(u'上传截图', u'成功!')
        # wf.send_feedback()
            print markdown_url
    else:
        util.notice('剪切板中没有图片')
Пример #2
0
def empty(options):
    max_baseline = options.max_baseline if options.max_baseline > 0.0 else \
        10000.0

    processor_options = {}
    processor_options["processor"] = options.processor
    processor_options["w_max"] = max_baseline
    processor_options["padding"] = 1.0
    processor_options["ms"] = options.ms
    processor_options["image"] = options.image
    processor_options["threads"] = options.threads
    processor_options["weighttype"] = options.weighttype
    processor_options["rmode"] = options.rmode
    processor_options["noise"] = options.noise
    processor_options["robustness"] = options.robustness
    processor_options["profile"] = options.profile
    processor_options["beamname"] = options.beamname

    processor_options["gridding.ATerm.name"] = "ATermPython"
    processor_options["ATermPython.module"] = "lofar.imager.myaterm"
    processor_options["ATermPython.class"] = "MyATerm"

    processor = processors.create_data_processor(options.ms, processor_options)

    channel_freq = processor.channel_frequency()
    channel_width = processor.channel_width()

    max_freq = numpy.max(channel_freq)
    image_size = 2.0 * util.full_width_half_max(70.0, max_freq)

    # TODO: Cyril mentioned above image size estimation is too conservative.
    # Need to check this and find a better estimate if necessary. For now, will
    # just multiply estimated FOV by 2.0.
    image_size *= 2.0

    (n_px, delta_px) = util.image_configuration(image_size, max_freq,
                                                max_baseline)

    util.notice("image configuration:")
    util.notice("    size: %d x %d pixel" % (n_px, n_px))
    util.notice("    angular size: %.2f deg" % (image_size * 180.0 / numpy.pi))
    util.notice("    angular resolution @ 3 pixel/beam: %.2f arcsec/pixel" %
                (3600.0 * delta_px * 180.0 / numpy.pi))

    image_shape = (1, 4, n_px, n_px)
    image_coordinates = pyrap.images.coordinates.coordinatesystem(
        lofar.casaimwrap.make_coordinate_system(image_shape[2:],
                                                [delta_px, delta_px],
                                                processor.phase_reference(),
                                                channel_freq, channel_width))

    util.notice("creating empty image...")
    pyrap.images.image(options.image,
                       shape=image_shape,
                       coordsys=image_coordinates)
Пример #3
0
def empty(options):
    max_baseline = options.max_baseline if options.max_baseline > 0.0 else \
        10000.0

    processor_options = {}
    processor_options["processor"] = options.processor
    processor_options["w_max"] = max_baseline
    processor_options["padding"] = 1.0
    processor_options["ms"] = options.ms
    processor_options["image"] = options.image
    processor_options["threads"] = options.threads
    processor_options["weighttype"] = options.weighttype
    processor_options["rmode"] = options.rmode
    processor_options["noise"] = options.noise
    processor_options["robustness"] = options.robustness
    processor_options["profile"] = options.profile
    processor_options["beamname"] = options.beamname

    processor_options["gridding.ATerm.name"] = "ATermPython"
    processor_options["ATermPython.module"] = "lofar.imager.myaterm"
    processor_options["ATermPython.class"] = "MyATerm"

    processor = processors.create_data_processor(options.ms, processor_options)

    channel_freq = processor.channel_frequency()
    channel_width = processor.channel_width()

    max_freq = numpy.max(channel_freq)
    image_size = 2.0 * util.full_width_half_max(70.0, max_freq)

    # TODO: Cyril mentioned above image size estimation is too conservative.
    # Need to check this and find a better estimate if necessary. For now, will
    # just multiply estimated FOV by 2.0.
    image_size *= 2.0

    (n_px, delta_px) = util.image_configuration(image_size, max_freq,
        max_baseline)

    util.notice("image configuration:")
    util.notice("    size: %d x %d pixel" % (n_px, n_px))
    util.notice("    angular size: %.2f deg" % (image_size * 180.0 / numpy.pi))
    util.notice("    angular resolution @ 3 pixel/beam: %.2f arcsec/pixel"
        % (3600.0 * delta_px * 180.0 / numpy.pi))

    image_shape = (1, 4, n_px, n_px)
    image_coordinates = pyrap.images.coordinates.coordinatesystem(
        lofar.casaimwrap.make_coordinate_system(image_shape[2:], [delta_px,
        delta_px], processor.phase_reference(), channel_freq, channel_width))

    util.notice("creating empty image...")
    pyrap.images.image(options.image, shape=image_shape,
        coordsys=image_coordinates)
Пример #4
0
def upload_qiniu(path, upload_name):
    ''' 上传文件到七牛 '''
    ak = os.getenv('access_key')
    sk = os.getenv('secret_key')
    bulk = os.getenv('bulk')
    qiniu_domain_url = os.getenv('qiniu_domain_url')

    if ak == None or sk == None:
        util.notice("请输入七牛相关配置")
        return False
    q = Auth(ak, sk)
    key = '%s/%s' % (bulk, upload_name)

    token = q.upload_token(bulk, key)
    ret, info = put_file(token, key, path)
    return ret != None and ret['key'] == key
Пример #5
0
def degridder(options):
    max_baseline = options.max_baseline if options.max_baseline > 0.0 else \
        10000.0

    processor_options = {}
    processor_options["threads"] = options.threads
    processor_options["processor"] = options.processor
    processor_options["w_max"] = max_baseline
    processor_options["padding"] = 1.0
    processor_options["image"] = options.image
    processor_options["weighttype"] = options.weighttype
    processor_options["rmode"] = options.rmode
    processor_options["noise"] = options.noise
    processor_options["robustness"] = options.robustness
    processor_options["profile"] = options.profile
    processor_options["chunksize"] = options.chunksize
    processor_options["outcol"] = options.outcol
    
    processor_options["gridding.ATerm.name"] = "ATermPython"
    processor_options["ATermPython.module"] = "imager.myaterm"
    processor_options["ATermPython.class"] = "MyATerm"
    
    processor = processors.create_data_processor(options.ms, processor_options)

    '''channel_freq = processor.channel_frequency()
    channel_width = processor.channel_width()

    max_freq = numpy.max(channel_freq)
    image_size = 2.0 * util.full_width_half_max(70.0, max_freq)

    # TODO: Cyril mentioned above image size estimation is too conservative.
    # Need to check this and find a better estimate if necessary. For now, will
    # just multiply estimated FOV by 2.0.
    image_size *= 2.0

    (n_px, delta_px) = util.image_configuration(image_size, max_freq,
        max_baseline)

    util.notice("image configuration:")
    util.notice("    size: %d x %d pixel" % (n_px, n_px))
    util.notice("    angular size: %.2f deg" % (image_size * 180.0 / numpy.pi))
    util.notice("    angular resolution @ 3 pixel/beam: %.2f arcsec/pixel"
        % (3600.0 * delta_px * 180.0 / numpy.pi))

    image_shape = (1, 4, n_px, n_px)
    image_coordinates = pyrap.images.coordinates.coordinatesystem(
        casaimwrap.make_coordinate_system(image_shape[2:], [delta_px,
        delta_px], processor.phase_reference(), channel_freq, channel_width))'''


    #Read input model image
    modelim = pyrap.images.image(processor_options['image'])

    util.notice("Model name: %s"%processor_options['image'])
    print type(modelim)
    util.notice('model shape: %s'%modelim.shape());

    #Get image coordinates
    model_coordinates = modelim.coordinates()
    model = modelim.getdata()

    print type(model_coordinates)
    print type(model)
    print 'model shape after getdata: ',model.shape;

    #degrid
    util.notice("Predicting visibilities...")
    tab = pyrap.tables.table(options.ms)
    nrows = tab.nrows()
    tab.close()
    print "There are ", nrows, " rows in the MS..."
    if options.chunksize > 0 and options.chunksize <= nrows:
      print 'calling degrid_chunk...'
      processor.degrid_chunk(model_coordinates, model, processors.Normalization.FLAT_GAIN, options.chunksize)
    else:
      print 'calling degrid...'
      processor.degrid(model_coordinates, model, processors.Normalization.FLAT_GAIN)
Пример #6
0
def dirty(options):
    # Create the data processor. The data processor is an abstration over
    # different gridding / degridding algorithms. The idea is that the data
    # processor transforms from image to visibilities and vice versa. The rest
    # of the code only works on images and does not (need to) accesss visibility
    # data.
    #
    # Several implementation of the data processor interface (see
    # processors/data_processor_base.py) are available. The idea is to have
    # optimized implementations for specific cases, as well as (possibly slower)
    # generic implementations.
    #
    # TODO: Need to create a smaller set of options that are required when the
    # data processor is instantiated. For example, to create an empty image,
    # details about the weighting scheme are not important.
    #
    max_baseline = options.max_baseline if options.max_baseline > 0.0 else \
        10000.0
    processor_options = {}
    processor_options["processor"] = options.processor
    processor_options["w_max"] = max_baseline
    processor_options["padding"] = 1.0
    processor_options["image"] = options.image
    processor_options["threads"] = options.threads
    processor_options["weighttype"] = options.weighttype
    processor_options["rmode"] = options.rmode
    processor_options["noise"] = options.noise
    processor_options["robustness"] = options.robustness
    processor_options["profile"] = options.profile
    processor_options["chunksize"] = options.chunksize

    processor_options["gridding.ATerm.name"] = "ATermPython"
    processor_options["ATermPython.module"] = "imager.myaterm"
    processor_options["ATermPython.class"] = "MyATerm"

    processor = processors.create_data_processor(options.ms, processor_options)

    channel_freq = processor.channel_frequency()
    channel_width = processor.channel_width()

    # Estimate the size of the image in radians, based on an esitmate of the
    # FWHM of the station beam, assuming a station diameter of 70 meters.
    max_freq = numpy.max(channel_freq)
    image_size = 2.0 * util.full_width_half_max(70.0, max_freq)

    # TODO: Cyril mentioned above image size estimation is too conservative.
    # Need to check this and find a better estimate if necessary. For now, will
    # just multiply estimated FOV by 2.0.
    image_size *= 2.0

    # Estimate the number of pixels and the pixels size in radians such that
    # the image is sampled at approximately 3 pixels per beam.
    (n_px, delta_px) = util.image_configuration(image_size, max_freq,
        max_baseline)

    util.notice("image configuration:")
    util.notice("    size: %d x %d pixel" % (n_px, n_px))
    util.notice("    angular size: %.2f deg" % (image_size * 180.0 / numpy.pi))
    util.notice("    angular resolution @ 3 pixel/beam: %.2f arcsec/pixel"
        % (3600.0 * delta_px * 180.0 / numpy.pi))

    # Create an empty image. For the moment, the implementation is limited to
    # single channel images.
    image_shape = (1, 4, n_px, n_px)
    image_coordinates = pyrap.images.coordinates.coordinatesystem(
        casaimwrap.make_coordinate_system(image_shape[2:], [delta_px,
        delta_px], processor.phase_reference(), channel_freq, channel_width))

    # Call the data processor to grid the visibility data (i.e. compute the
    # dirty image).
    util.notice("creating dirty image...")
    tab = pyrap.tables.table(options.ms)
    nrows = tab.nrows()
    tab.close()
    print "There are ", nrows, " rows in the MS..."
    if options.chunksize > 0 and options.chunksize <= nrows:
      print 'calling grid_chunk...'
      dirty_image, _ = processor.grid_chunk(image_coordinates, image_shape,
        processors.Normalization.FLAT_NOISE, options.chunksize)
    else:
      print 'calling grid...'
      dirty_image, _ = processor.grid(image_coordinates, image_shape,
        processors.Normalization.FLAT_NOISE)

    # Store output images. Store both a flat noise and a flat gain image.
    util.notice("storing dirty images...")
    util.store_image(options.image + ".dirty.flat_noise",
        image_coordinates, dirty_image)
Пример #7
0
def mfclean(options):
    clark_options = {}
    clark_options["gain"] = options.gain
    clark_options["iterations"] = options.iterations
    clark_options["cycle_speedup"] = options.cycle_speedup

    max_baseline = options.max_baseline if options.max_baseline > 0.0 else \
        10000.0

    processor_options = {}
    processor_options["processor"] = options.processor
    processor_options["w_max"] = max_baseline
    processor_options["padding"] = 1.0
    processor_options["image"] = options.image
    processor_options["threads"] = options.threads
    processor_options["weighttype"] = options.weighttype
    processor_options["rmode"] = options.rmode
    processor_options["noise"] = options.noise
    processor_options["robustness"] = options.robustness
    processor_options["profile"] = options.profile
    processor = processors.create_data_processor(options.ms, processor_options)

    channel_freq = processor.channel_frequency()
    channel_width = processor.channel_width()

    max_freq = numpy.max(channel_freq)
    image_size = 2.0 * util.full_width_half_max(70.0, max_freq)

    # TODO: Cyril mentioned above image size estimation is too conservative.
    # Need to check this and find a better estimate if necessary. For now, will
    # just multiply estimated FOV by 2.0.
    image_size *= 2.0

    (n_px, delta_px) = util.image_configuration(image_size, max_freq,
        max_baseline)

    util.notice("image configuration:")
    util.notice("    size: %d x %d pixel" % (n_px, n_px))
    util.notice("    angular size: %.2f deg"
        % (image_size * 180.0 / numpy.pi))
    util.notice("    angular resolution @ 3 pixel/beam: %.2f arcsec/pixel"
        % (3600.0 * delta_px * 180.0 / numpy.pi))

    # TODO: Need to implement support for multiple channel images. Currently,
    # all data channels are combined into a single MFS image per correlation.
    image_shape = (1, 4, n_px, n_px)
    image_coordinates = pyrap.images.coordinates.coordinatesystem(
        casaimwrap.make_coordinate_system(image_shape[2:], [delta_px,
        delta_px], processor.phase_reference(), channel_freq, channel_width))

    n_model = 1
    # TODO: Check code for n_model > 1!
    assert(n_model == 1)

    # Comment from CASA source code:
    #
    # Set to search for peak in I^2+Q^2+U^2+V^2 domain or each stokes plane
    # seperately. Ignored for hogbom and msclean for now.
#    join_stokes = False
    join_stokes = True

    # Compute approximate PSFs.
    util.notice("computing approximate point spread functions...")
    psf = [None for i in range(n_model)]
    beam = [None for i in range(n_model)]
    for i in range(n_model):
        psf[i] = processor.point_spread_function(image_coordinates, image_shape)
        fit = casaimwrap.fit_gaussian_psf(image_coordinates.dict(),
            psf[i])
        assert(fit["ok"])

        beam[i] = BeamParameters((fit["major"] * numpy.pi) / (3600.0 * 180.0),
            (fit["minor"] * numpy.pi) / (3600.0 * 180.0), (fit["angle"]
            * numpy.pi) / 180.0)

        util.notice("model %d/%d: major axis: %f arcsec, minor axis: %f arcsec,"
            " position angle: %f deg" % (i, n_model - 1, abs(fit["major"]),
            abs(fit["minor"]), fit["angle"]))

    # Validate PSFs.
    (min_psf, max_psf, max_psf_outer, psf_patch_size, max_sidelobe) = \
        validate_psf(image_coordinates, psf, beam)
    clark_options["psf_patch_size"] = psf_patch_size

    updated = [False for i in range(n_model)]
    weight = [None for i in range(n_model)]
    model = [numpy.zeros(image_shape) for i in range(n_model)]
    delta = [numpy.zeros(image_shape) for i in range(n_model)]
    residual = [numpy.zeros(image_shape) for i in range(n_model)]

    if join_stokes:
        iterations = numpy.zeros((n_model, 1, image_shape[0]))
        stokes = ["JOINT"]
        cr_slices = [slice(None)]
    else:
        iterations = numpy.zeros((n_model, image_shape[1], image_shape[0]))
        stokes = image_coordinates.get_coordinate("stokes").get_stokes()
        cr_slices = [slice(i, i + 1) for i in range(4)]

    cycle = 0
    diverged = False
    absmax = options.threshold
    previous_absmax = 1e30

    while absmax >= options.threshold and numpy.max(iterations) \
        < options.iterations and (cycle == 0 or any(updated)):

        util.notice(">> starting major cycle: %d <<" % cycle)

        # Comment from CASA source code:
        #
        # Make the residual images. We do an incremental update for cycles after
        # the first one. If we have only one model then we use convolutions to
        # speed the processing
        util.notice("computing residuals...")

        # TODO: If n_models > 1, need to compute residuals from the sum of
        # the degridded visibilities (see LofarCubeSkyEquation.cc).
        assert(n_model == 1)
        if cycle == 0:
            # Assuming the initial models are zero, the residual visibilities
            # equal the observed visibilities and therefore we only need to
            # grid them.
            for i in range(n_model):
                residual[i], weight[i] = processor.grid(image_coordinates,
                    image_shape, processors.Normalization.FLAT_NOISE)
        else:
            for i in range(n_model):
                if updated[i]:
                    residual[i], weight[i] = \
                        processor.residual(image_coordinates, model[i],
                            processors.Normalization.FLAT_NOISE,
                            processors.Normalization.FLAT_NOISE)
                updated[i] = False

        # Compute residual statistics.
        (absmax, resmin, resmax) = max_field(residual, weight)

        # Print some statistics.
        for i in range(n_model):
            util.notice("model %d/%d: min residual: %f, max residual: %f"
                % (i, n_model - 1, resmin[i], resmax[i]))
        util.notice("peak residual: %f" % absmax)

        # Comment from CASA source code:
        #
        # Check if absmax is 5% above its previous value.
        #
        # TODO: Value used does not look like 5%?
        if absmax >= 1.000005 * previous_absmax:
            diverged = True
            break

        # Store absmax of this major cycle for later reference.
        previous_absmax = absmax

        # Check stop criterium.
        if absmax < options.threshold:
            break

        # TODO: What is this really used for? And does the max weight indeed
        # correspond to sensitivity in Jy/beam?
        if cycle == 0:
            max_weight = 0.0
            for i in range(n_model):
                max_weight = max(max_weight, numpy.max(weight[i]))
            util.notice("maximum sensitivity: %f Jy/beam" % (1.0
                / numpy.sqrt(max_weight)))

        # Comment from CASA source code:
        #
        # Calculate the threshold for this cycle. Add a safety factor
        #
        # fractionOfPsf controls how deep the cleaning should go.
        # There are two user-controls.
        # cycleFactor_p : scale factor for the PSF sidelobe level.
        #                        1 : clean down to the psf sidelobe level
        #                        <1 : go deeper
        #                        >1 : shallower : stop sooner.
        #                        Default : 1.5
        # cycleMaxPsfFraction_p : scale factor as a fraction of the PSF peak
        #                                    must be 0.0 < xx < 1.0 (obviously)
        #                                    Default : 0.8
        fraction_of_psf = min(options.cycle_max_psf_fraction,
            options.cycle_factor * max_sidelobe)

        if fraction_of_psf > 0.8:
            util.warning("PSF fraction for threshold computation is too"
                " high: %f. Forcing to 0.8 to ensure that the threshold is"
                " smaller than the peak residual!" % fraction_of_psf)
            fraction_of_psf = 0.8   # painfully slow!

        # Update cycle threshold.
        cycle_threshold = max(0.95 * options.threshold, fraction_of_psf
            * absmax)
        clark_options["cycle_threshold"] = cycle_threshold

        util.notice("minor cycle threshold max(0.95 * %f, peak residual * %f):"
            " %f" % (options.threshold, fraction_of_psf, cycle_threshold))

        # Execute the minor cycle (Clark clean) for each channel of each model.
        util.notice("starting minor cycle...")
        for i in range(n_model):
            if max(abs(resmin[i]), abs(resmax[i])) < cycle_threshold:
                util.notice("model %d/%d: peak residual below threshold"
                    % (i, n_model - 1))
                continue

            if max_psf[i] <= 0.0:
                util.warning("model %d/%d: point spread function negative or"
                    " zero" % (i, n_model - 1))
                continue

            # Zero the delta image for this model.
            delta[i].fill(0.0)

            for (cr, cr_slice) in enumerate(cr_slices):
                for ch in range(len(residual[i])):
                    # TODO: The value of max_weight is only updated during
                    # cycle 0. Is this correct?
                    #
                    assert(len(weight[i].shape) == 2
                        and weight[i].shape[:2] == residual[i].shape[:2])

                    plane_weight = numpy.sqrt(weight[i][ch, cr_slice]
                        / max_weight)
                    if numpy.any(plane_weight > 0.01):
                        weight_mask = numpy.ones((residual[i].shape[2:]))
                    else:
                        weight_mask = numpy.zeros((residual[i].shape[2:]))

                    # Call CASA Clark clean implementation (minor cycle).
                    # TODO: When cleaning each Stokes parameter separately,
                    # the PSF of Stokes I is used for all others as well?
                    #
                    # Comment from CASA source code:
                    #
                    # We only want the PSF for the first polarization so we
                    # iterate over polarization LAST.
                    #
                    result = casaimwrap.clark_clean(psf[i][ch,0,:,:],
                        residual[i][ch,cr_slice,:,:], weight_mask,
                        iterations[i,cr,ch], clark_options)

                    if result["iterations"] > iterations[i,cr,ch]:
                        updated[i] = True
                        delta[i][ch,cr_slice,:,:] = result["delta"]
                        iterations[i,cr,ch] = result["iterations"]
                    else:
                        assert(numpy.all(result["delta"] == 0.0))

                util.notice("model %d/%d: stokes: %s, cleaned: %f Jy, "
                    "iterations per channel: %s" % (i, n_model - 1,
                    stokes[cr], numpy.sum(delta[i][ch,cr_slice,:,:]),
                    str(iterations[i,cr,:])))

        # Update model images if required.
        for i in range(n_model):
            if updated[i]:
                model[i] += delta[i]

        # Update major cycle counter.
        cycle += 1

    if any(updated):
        util.notice("finalizing residual images for all fields...")
        for i in range(n_model):
            if updated[i]:
                residual[i], weight[i] = processor.residual(image_coordinates,
                    model[i], processors.Normalization.FLAT_NOISE,
                    processors.Normalization.FLAT_NOISE)
        (absmax, resmin, resmax) = max_field(residual, weight)

        # Print some statistics.
        for i in range(n_model):
            util.notice("model %d/%d: min residual: %f, max residual: %f"
                % (i, n_model - 1, resmin[i], resmax[i]))
        util.notice("peak residual: %f" % absmax)
    else:
        util.notice("residual images for all fields are up-to-date...")

    # Store output images.
    util.notice("storing average response...")
    util.store_image(options.image + ".response", image_coordinates,
        processor.response(image_coordinates, image_shape))

    util.notice("storing model images...")
    for i in range(n_model):
        util.store_image(options.image + ".model.flat_noise",
            image_coordinates, model[i])
        util.store_image(options.image + ".model", image_coordinates,
            processor.normalize(image_coordinates, model[i],
            processors.Normalization.FLAT_NOISE,
            processors.Normalization.FLAT_GAIN))

    util.notice("storing residual images...")
    for i in range(n_model):
        util.store_image(options.image + ".residual.flat_noise",
            image_coordinates, residual[i])
        util.store_image(options.image + ".residual", image_coordinates,
            processor.normalize(image_coordinates, residual[i],
            processors.Normalization.FLAT_NOISE,
            processors.Normalization.FLAT_GAIN))

    util.notice("storing restored images...")
    for i in range(n_model):
        restored = restore_image(image_coordinates.dict(), model[i],
            residual[i], beam[i])

        util.store_image(options.image + ".restored.flat_noise",
            image_coordinates, restored)
        util.store_image(options.image + ".restored", image_coordinates,
            processor.normalize(image_coordinates, restored,
            processors.Normalization.FLAT_NOISE,
            processors.Normalization.FLAT_GAIN))

    # Print some statistics.
    for i in range(n_model):
        util.notice("model %d/%d: clean flux: %f, residual rms: %f" % (i,
            n_model - 1, numpy.sum(model[i]), numpy.std(residual[i])))

    if diverged:
        util.error("clean diverged.")
    elif absmax < options.threshold:
        util.notice("clean converged.")
    else:
        util.warning("clean did not reach threshold: %f Jy."
            % options.threshold)
Пример #8
0
enabled = ['codfw', 'ulsfo', 'pmtpa', 'ops-requests', 'network', 'esams', 'eqiad', 'core-ops',]

import util
try:
    from rtppl import ppl as users
except:
    util.notice("rtppl not found!")
    users = {}
import re
from datetime import datetime

prepend = 'rt'

def rt_cc_defaults(queue):
    #'operations-codfw': '(rhalsell,mbergsma,cmjohnson)
    #'operations-eqiad': (mbergsma,rhalsell,'ashburn dc engineers')
    #'esams': 'operations-esams',,none,(mbergsma,rhalsell,'haarlem dc engineers')
    #'ops-pmtpa: ',none,(mbergsma,rhalsell,'tampa dc engineers')
    #       'procurement': ,none,(mbergsma,rhalsell)
    #       'ulsfo': ,none,(rhalsell,'san francisco dc engineers')

    ccs = {'access-requests': 'operations',
           'core-ops': 'operations',
           'codfw': 'operations-codfw',
           'domains': 'operations',
           'eqiad': 'operations-eqiad',
           'esams': 'operations-esams',
           'legal': 'operations',
           'maint-announce': 'operations',
           'network': 'operations',
           'ops-requests': 'operations',
Пример #9
0
def degridder(options):
    max_baseline = options.max_baseline if options.max_baseline > 0.0 else \
        10000.0

    processor_options = {}
    processor_options["threads"] = options.threads
    processor_options["processor"] = options.processor
    processor_options["w_max"] = max_baseline
    processor_options["padding"] = 1.0
    processor_options["ms"] = options.ms
    processor_options["image"] = options.image
    processor_options["weighttype"] = options.weighttype
    processor_options["rmode"] = options.rmode
    processor_options["noise"] = options.noise
    processor_options["robustness"] = options.robustness
    processor_options["profile"] = options.profile
    processor_options["chunksize"] = options.chunksize
    processor_options["outcol"] = options.outcol
    processor_options["beamname"] = options.beamname

    processor_options["gridding.ATerm.name"] = "ATermPython"
    processor_options["ATermPython.module"] = "lofar.imager.myaterm"
    processor_options["ATermPython.class"] = "MyATerm"

    processor = processors.create_data_processor(options.ms, processor_options)
    '''channel_freq = processor.channel_frequency()
    channel_width = processor.channel_width()

    max_freq = numpy.max(channel_freq)
    image_size = 2.0 * util.full_width_half_max(70.0, max_freq)

    # TODO: Cyril mentioned above image size estimation is too conservative.
    # Need to check this and find a better estimate if necessary. For now, will
    # just multiply estimated FOV by 2.0.
    image_size *= 2.0

    (n_px, delta_px) = util.image_configuration(image_size, max_freq,
        max_baseline)

    util.notice("image configuration:")
    util.notice("    size: %d x %d pixel" % (n_px, n_px))
    util.notice("    angular size: %.2f deg" % (image_size * 180.0 / numpy.pi))
    util.notice("    angular resolution @ 3 pixel/beam: %.2f arcsec/pixel"
        % (3600.0 * delta_px * 180.0 / numpy.pi))

    image_shape = (1, 4, n_px, n_px)
    image_coordinates = pyrap.images.coordinates.coordinatesystem(
        lofar.casaimwrap.make_coordinate_system(image_shape[2:], [delta_px,
        delta_px], processor.phase_reference(), channel_freq, channel_width))'''

    #Read input model image
    modelim = pyrap.images.image(processor_options['image'])

    util.notice("Model name: %s" % processor_options['image'])
    print type(modelim)
    util.notice('model shape: %s' % modelim.shape())

    #Get image coordinates
    model_coordinates = modelim.coordinates()
    model = modelim.getdata()

    print type(model_coordinates)
    print type(model)
    print 'model shape after getdata: ', model.shape

    #degrid
    util.notice("Predicting visibilities...")
    tab = pyrap.tables.table(options.ms)
    nrows = tab.nrows()
    tab.close()
    print "There are ", nrows, " rows in the MS..."
    if options.chunksize > 0 and options.chunksize <= nrows:
        print 'calling degrid_chunk...'
        processor.degrid_chunk(model_coordinates, model,
                               processors.Normalization.FLAT_GAIN,
                               options.chunksize)
    else:
        print 'calling degrid...'
        processor.degrid(model_coordinates, model,
                         processors.Normalization.FLAT_GAIN)
Пример #10
0
def dirty(options):
    # Create the data processor. The data processor is an abstration over
    # different gridding / degridding algorithms. The idea is that the data
    # processor transforms from image to visibilities and vice versa. The rest
    # of the code only works on images and does not (need to) accesss visibility
    # data.
    #
    # Several implementation of the data processor interface (see
    # processors/data_processor_base.py) are available. The idea is to have
    # optimized implementations for specific cases, as well as (possibly slower)
    # generic implementations.
    #
    # TODO: Need to create a smaller set of options that are required when the
    # data processor is instantiated. For example, to create an empty image,
    # details about the weighting scheme are not important.
    #
    max_baseline = options.max_baseline if options.max_baseline > 0.0 else \
        10000.0
    processor_options = {}
    processor_options["processor"] = options.processor
    processor_options["w_max"] = max_baseline
    processor_options["padding"] = 1.0
    processor_options["ms"] = options.ms
    processor_options["image"] = options.image
    processor_options["threads"] = options.threads
    processor_options["weighttype"] = options.weighttype
    processor_options["rmode"] = options.rmode
    processor_options["noise"] = options.noise
    processor_options["robustness"] = options.robustness
    processor_options["profile"] = options.profile
    processor_options["chunksize"] = options.chunksize
    processor_options["outcol"] = options.outcol
    processor_options["beamname"] = options.beamname

    #processor_options["gridding.ATerm.name"] = "ATermLofar"
    processor_options["gridding.ATerm.name"] = "ATermPython"
    processor_options["ATermPython.module"] = "lofar.imager.myaterm"
    processor_options["ATermPython.class"] = "MyATerm"

    processor = processors.create_data_processor(options.ms, processor_options)

    channel_freq = processor.channel_frequency()
    channel_width = processor.channel_width()

    # Estimate the size of the image in radians, based on an esitmate of the
    # FWHM of the station beam, assuming a station diameter of 70 meters.
    max_freq = numpy.max(channel_freq)
    image_size = 2.0 * util.full_width_half_max(70.0, max_freq)

    # TODO: Cyril mentioned above image size estimation is too conservative.
    # Need to check this and find a better estimate if necessary. For now, will
    # just multiply estimated FOV by 2.0.
    image_size *= 2.0

    # Estimate the number of pixels and the pixels size in radians such that
    # the image is sampled at approximately 3 pixels per beam.
    (n_px, delta_px) = util.image_configuration(image_size, max_freq,
                                                max_baseline)

    util.notice("image configuration:")
    util.notice("    size: %d x %d pixel" % (n_px, n_px))
    util.notice("    angular size: %.2f deg" % (image_size * 180.0 / numpy.pi))
    util.notice("    angular resolution @ 3 pixel/beam: %.2f arcsec/pixel" %
                (3600.0 * delta_px * 180.0 / numpy.pi))

    # Create an empty image. For the moment, the implementation is limited to
    # single channel images.
    image_shape = (1, 4, n_px, n_px)
    image_coordinates = pyrap.images.coordinates.coordinatesystem(
        lofar.casaimwrap.make_coordinate_system(image_shape[2:],
                                                [delta_px, delta_px],
                                                processor.phase_reference(),
                                                channel_freq, channel_width))

    # Call the data processor to grid the visibility data (i.e. compute the
    # dirty image).
    util.notice("creating dirty image...")
    tab = pyrap.tables.table(options.ms)
    nrows = tab.nrows()
    tab.close()
    print "There are ", nrows, " rows in the MS..."
    if options.chunksize > 0 and options.chunksize <= nrows:
        print 'calling grid_chunk...'
        dirty_image, _ = processor.grid_chunk(
            image_coordinates, image_shape,
            processors.Normalization.FLAT_NOISE, options.chunksize)
    else:
        print 'calling grid...'
        dirty_image, _ = processor.grid(image_coordinates, image_shape,
                                        processors.Normalization.FLAT_NOISE)

    # Store output images. Store both a flat noise and a flat gain image.
    util.notice("storing dirty images...")
    util.store_image(options.image + ".dirty.flat_noise", image_coordinates,
                     dirty_image)
Пример #11
0
def mfclean(options):
    clark_options = {}
    clark_options["gain"] = options.gain
    clark_options["iterations"] = options.iterations
    clark_options["cycle_speedup"] = options.cycle_speedup

    max_baseline = options.max_baseline if options.max_baseline > 0.0 else \
        10000.0

    processor_options = {}
    processor_options["processor"] = options.processor
    processor_options["w_max"] = max_baseline
    processor_options["padding"] = 1.0
    processor_options["image"] = options.image
    processor_options["threads"] = options.threads
    processor_options["weighttype"] = options.weighttype
    processor_options["rmode"] = options.rmode
    processor_options["noise"] = options.noise
    processor_options["robustness"] = options.robustness
    processor_options["profile"] = options.profile
    processor = processors.create_data_processor(options.ms, processor_options)

    channel_freq = processor.channel_frequency()
    channel_width = processor.channel_width()

    max_freq = numpy.max(channel_freq)
    image_size = 2.0 * util.full_width_half_max(70.0, max_freq)

    # TODO: Cyril mentioned above image size estimation is too conservative.
    # Need to check this and find a better estimate if necessary. For now, will
    # just multiply estimated FOV by 2.0.
    image_size *= 2.0

    (n_px, delta_px) = util.image_configuration(image_size, max_freq,
                                                max_baseline)

    util.notice("image configuration:")
    util.notice("    size: %d x %d pixel" % (n_px, n_px))
    util.notice("    angular size: %.2f deg" % (image_size * 180.0 / numpy.pi))
    util.notice("    angular resolution @ 3 pixel/beam: %.2f arcsec/pixel" %
                (3600.0 * delta_px * 180.0 / numpy.pi))

    # TODO: Need to implement support for multiple channel images. Currently,
    # all data channels are combined into a single MFS image per correlation.
    image_shape = (1, 4, n_px, n_px)
    image_coordinates = pyrap.images.coordinates.coordinatesystem(
        casaimwrap.make_coordinate_system(image_shape[2:],
                                          [delta_px, delta_px],
                                          processor.phase_reference(),
                                          channel_freq, channel_width))

    n_model = 1
    # TODO: Check code for n_model > 1!
    assert (n_model == 1)

    # Comment from CASA source code:
    #
    # Set to search for peak in I^2+Q^2+U^2+V^2 domain or each stokes plane
    # seperately. Ignored for hogbom and msclean for now.
    #    join_stokes = False
    join_stokes = True

    # Compute approximate PSFs.
    util.notice("computing approximate point spread functions...")
    psf = [None for i in range(n_model)]
    beam = [None for i in range(n_model)]
    for i in range(n_model):
        psf[i] = processor.point_spread_function(image_coordinates,
                                                 image_shape)
        fit = casaimwrap.fit_gaussian_psf(image_coordinates.dict(), psf[i])
        assert (fit["ok"])

        beam[i] = BeamParameters((fit["major"] * numpy.pi) / (3600.0 * 180.0),
                                 (fit["minor"] * numpy.pi) / (3600.0 * 180.0),
                                 (fit["angle"] * numpy.pi) / 180.0)

        util.notice(
            "model %d/%d: major axis: %f arcsec, minor axis: %f arcsec,"
            " position angle: %f deg" % (i, n_model - 1, abs(
                fit["major"]), abs(fit["minor"]), fit["angle"]))

    # Validate PSFs.
    (min_psf, max_psf, max_psf_outer, psf_patch_size, max_sidelobe) = \
        validate_psf(image_coordinates, psf, beam)
    clark_options["psf_patch_size"] = psf_patch_size

    updated = [False for i in range(n_model)]
    weight = [None for i in range(n_model)]
    model = [numpy.zeros(image_shape) for i in range(n_model)]
    delta = [numpy.zeros(image_shape) for i in range(n_model)]
    residual = [numpy.zeros(image_shape) for i in range(n_model)]

    if join_stokes:
        iterations = numpy.zeros((n_model, 1, image_shape[0]))
        stokes = ["JOINT"]
        cr_slices = [slice(None)]
    else:
        iterations = numpy.zeros((n_model, image_shape[1], image_shape[0]))
        stokes = image_coordinates.get_coordinate("stokes").get_stokes()
        cr_slices = [slice(i, i + 1) for i in range(4)]

    cycle = 0
    diverged = False
    absmax = options.threshold
    previous_absmax = 1e30

    while absmax >= options.threshold and numpy.max(iterations) \
        < options.iterations and (cycle == 0 or any(updated)):

        util.notice(">> starting major cycle: %d <<" % cycle)

        # Comment from CASA source code:
        #
        # Make the residual images. We do an incremental update for cycles after
        # the first one. If we have only one model then we use convolutions to
        # speed the processing
        util.notice("computing residuals...")

        # TODO: If n_models > 1, need to compute residuals from the sum of
        # the degridded visibilities (see LofarCubeSkyEquation.cc).
        assert (n_model == 1)
        if cycle == 0:
            # Assuming the initial models are zero, the residual visibilities
            # equal the observed visibilities and therefore we only need to
            # grid them.
            for i in range(n_model):
                residual[i], weight[i] = processor.grid(
                    image_coordinates, image_shape,
                    processors.Normalization.FLAT_NOISE)
        else:
            for i in range(n_model):
                if updated[i]:
                    residual[i], weight[i] = \
                        processor.residual(image_coordinates, model[i],
                            processors.Normalization.FLAT_NOISE,
                            processors.Normalization.FLAT_NOISE)
                updated[i] = False

        # Compute residual statistics.
        (absmax, resmin, resmax) = max_field(residual, weight)

        # Print some statistics.
        for i in range(n_model):
            util.notice("model %d/%d: min residual: %f, max residual: %f" %
                        (i, n_model - 1, resmin[i], resmax[i]))
        util.notice("peak residual: %f" % absmax)

        # Comment from CASA source code:
        #
        # Check if absmax is 5% above its previous value.
        #
        # TODO: Value used does not look like 5%?
        if absmax >= 1.000005 * previous_absmax:
            diverged = True
            break

        # Store absmax of this major cycle for later reference.
        previous_absmax = absmax

        # Check stop criterium.
        if absmax < options.threshold:
            break

        # TODO: What is this really used for? And does the max weight indeed
        # correspond to sensitivity in Jy/beam?
        if cycle == 0:
            max_weight = 0.0
            for i in range(n_model):
                max_weight = max(max_weight, numpy.max(weight[i]))
            util.notice("maximum sensitivity: %f Jy/beam" %
                        (1.0 / numpy.sqrt(max_weight)))

        # Comment from CASA source code:
        #
        # Calculate the threshold for this cycle. Add a safety factor
        #
        # fractionOfPsf controls how deep the cleaning should go.
        # There are two user-controls.
        # cycleFactor_p : scale factor for the PSF sidelobe level.
        #                        1 : clean down to the psf sidelobe level
        #                        <1 : go deeper
        #                        >1 : shallower : stop sooner.
        #                        Default : 1.5
        # cycleMaxPsfFraction_p : scale factor as a fraction of the PSF peak
        #                                    must be 0.0 < xx < 1.0 (obviously)
        #                                    Default : 0.8
        fraction_of_psf = min(options.cycle_max_psf_fraction,
                              options.cycle_factor * max_sidelobe)

        if fraction_of_psf > 0.8:
            util.warning(
                "PSF fraction for threshold computation is too"
                " high: %f. Forcing to 0.8 to ensure that the threshold is"
                " smaller than the peak residual!" % fraction_of_psf)
            fraction_of_psf = 0.8  # painfully slow!

        # Update cycle threshold.
        cycle_threshold = max(0.95 * options.threshold,
                              fraction_of_psf * absmax)
        clark_options["cycle_threshold"] = cycle_threshold

        util.notice("minor cycle threshold max(0.95 * %f, peak residual * %f):"
                    " %f" %
                    (options.threshold, fraction_of_psf, cycle_threshold))

        # Execute the minor cycle (Clark clean) for each channel of each model.
        util.notice("starting minor cycle...")
        for i in range(n_model):
            if max(abs(resmin[i]), abs(resmax[i])) < cycle_threshold:
                util.notice("model %d/%d: peak residual below threshold" %
                            (i, n_model - 1))
                continue

            if max_psf[i] <= 0.0:
                util.warning("model %d/%d: point spread function negative or"
                             " zero" % (i, n_model - 1))
                continue

            # Zero the delta image for this model.
            delta[i].fill(0.0)

            for (cr, cr_slice) in enumerate(cr_slices):
                for ch in range(len(residual[i])):
                    # TODO: The value of max_weight is only updated during
                    # cycle 0. Is this correct?
                    #
                    assert (len(weight[i].shape) == 2
                            and weight[i].shape[:2] == residual[i].shape[:2])

                    plane_weight = numpy.sqrt(weight[i][ch, cr_slice] /
                                              max_weight)
                    if numpy.any(plane_weight > 0.01):
                        weight_mask = numpy.ones((residual[i].shape[2:]))
                    else:
                        weight_mask = numpy.zeros((residual[i].shape[2:]))

                    # Call CASA Clark clean implementation (minor cycle).
                    # TODO: When cleaning each Stokes parameter separately,
                    # the PSF of Stokes I is used for all others as well?
                    #
                    # Comment from CASA source code:
                    #
                    # We only want the PSF for the first polarization so we
                    # iterate over polarization LAST.
                    #
                    result = casaimwrap.clark_clean(
                        psf[i][ch, 0, :, :], residual[i][ch, cr_slice, :, :],
                        weight_mask, iterations[i, cr, ch], clark_options)

                    if result["iterations"] > iterations[i, cr, ch]:
                        updated[i] = True
                        delta[i][ch, cr_slice, :, :] = result["delta"]
                        iterations[i, cr, ch] = result["iterations"]
                    else:
                        assert (numpy.all(result["delta"] == 0.0))

                util.notice("model %d/%d: stokes: %s, cleaned: %f Jy, "
                            "iterations per channel: %s" %
                            (i, n_model - 1, stokes[cr],
                             numpy.sum(delta[i][ch, cr_slice, :, :]),
                             str(iterations[i, cr, :])))

        # Update model images if required.
        for i in range(n_model):
            if updated[i]:
                model[i] += delta[i]

        # Update major cycle counter.
        cycle += 1

    if any(updated):
        util.notice("finalizing residual images for all fields...")
        for i in range(n_model):
            if updated[i]:
                residual[i], weight[i] = processor.residual(
                    image_coordinates, model[i],
                    processors.Normalization.FLAT_NOISE,
                    processors.Normalization.FLAT_NOISE)
        (absmax, resmin, resmax) = max_field(residual, weight)

        # Print some statistics.
        for i in range(n_model):
            util.notice("model %d/%d: min residual: %f, max residual: %f" %
                        (i, n_model - 1, resmin[i], resmax[i]))
        util.notice("peak residual: %f" % absmax)
    else:
        util.notice("residual images for all fields are up-to-date...")

    # Store output images.
    util.notice("storing average response...")
    util.store_image(options.image + ".response", image_coordinates,
                     processor.response(image_coordinates, image_shape))

    util.notice("storing model images...")
    for i in range(n_model):
        util.store_image(options.image + ".model.flat_noise",
                         image_coordinates, model[i])
        util.store_image(
            options.image + ".model", image_coordinates,
            processor.normalize(image_coordinates, model[i],
                                processors.Normalization.FLAT_NOISE,
                                processors.Normalization.FLAT_GAIN))

    util.notice("storing residual images...")
    for i in range(n_model):
        util.store_image(options.image + ".residual.flat_noise",
                         image_coordinates, residual[i])
        util.store_image(
            options.image + ".residual", image_coordinates,
            processor.normalize(image_coordinates, residual[i],
                                processors.Normalization.FLAT_NOISE,
                                processors.Normalization.FLAT_GAIN))

    util.notice("storing restored images...")
    for i in range(n_model):
        restored = restore_image(image_coordinates.dict(), model[i],
                                 residual[i], beam[i])

        util.store_image(options.image + ".restored.flat_noise",
                         image_coordinates, restored)
        util.store_image(
            options.image + ".restored", image_coordinates,
            processor.normalize(image_coordinates, restored,
                                processors.Normalization.FLAT_NOISE,
                                processors.Normalization.FLAT_GAIN))

    # Print some statistics.
    for i in range(n_model):
        util.notice(
            "model %d/%d: clean flux: %f, residual rms: %f" %
            (i, n_model - 1, numpy.sum(model[i]), numpy.std(residual[i])))

    if diverged:
        util.error("clean diverged.")
    elif absmax < options.threshold:
        util.notice("clean converged.")
    else:
        util.warning("clean did not reach threshold: %f Jy." %
                     options.threshold)
Пример #12
0
# coding: utf-8
from clipboard import get_paste_img_file
from upload import upload_local_file
import util
import os
import subprocess
import sys
import time

if not os.path.exists(util.CONFIG_FILE):
    util.generate_config_file()

config = util.read_config()
if not config:
    util.notice('请先新增s3配置信息')
    util.open_with_editor(util.CONFIG_FILE)
    sys.exit(0)

url = 'https://%s/%s/%s' % (config['endpoint'], config['tenantId'],
                            config['bucket'])

img_file, need_format, format = get_paste_img_file()
if img_file:
    # has image
    # use time to generate a unique upload_file name, we can not use the tmp file name
    upload_name = "%s/%s.%s" % (config['prefix'], int(
        time.time() * 1000), format)
    if need_format:
        size_str = subprocess.check_output(
            'sips -g pixelWidth %s | tail -n1 | cut -d" " -f4' % img_file.name,
            shell=True)
Пример #13
0
# coding: utf-8
from clipboard import get_paste_img_file
from upload import upload_img
import util
import os
import subprocess
import sys
import time

if not os.path.exists(util.CONFIG_FILE):
    util.generate_config_file()

config = util.read_config()
if not config:
    name = util.picbed_name()
    util.notice('请先设置你的%s信息' % name)
    util.open_with_editor(util.CONFIG_FILE)
    sys.exit(0)

url = '%s/%s' % (config['url'], config['prefix'])

img_file, need_format, format = get_paste_img_file()
if img_file:
    # has image

    # use time to generate a unique upload_file name, we can not use the tmp file name
    upload_name = "%s.%s" % (int(time.time() * 1000), format)
    if need_format:
        size_str = subprocess.check_output(
            'sips -g pixelWidth %s | tail -n1 | cut -d" " -f4' % img_file.name,
            shell=True)