Example #1
0
def generic_gradient_magnitude(
    input, derivative, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords={}
):
    """Calculate a gradient magnitude using the provdide function for
    the gradient.

    The derivative parameter must be a callable with the following 
    signature:
                    
    derivative(input, axis, output, mode, cval,
               *extra_arguments, **extra_keywords)

    The extra_arguments and extra_keywords arguments can be used to pass 
    extra arguments and keywords that are passed to derivative2 at each 
    call.
    """
    input = numarray.asarray(input)
    output, return_value = _ni_support._get_output(output, input)
    axes = range(input.rank)
    if len(axes) > 0:
        derivative(input, axes[0], output, mode, cval, *extra_arguments, **extra_keywords)
        numarray.multiply(output, output, output)
        for ii in range(1, len(axes)):
            tmp = derivative(input, axes[ii], output.type(), mode, cval, *extra_arguments, **extra_keywords)
            numarray.multiply(tmp, tmp, tmp)
            output += tmp
        numarray.sqrt(output, output)
    else:
        output[...] = input[...]
    return return_value
Example #2
0
def generic_gradient_magnitude(input,
                               derivative,
                               output=None,
                               mode="reflect",
                               cval=0.0,
                               extra_arguments=(),
                               extra_keywords={}):
    """Calculate a gradient magnitude using the provdide function for
    the gradient.

    The derivative parameter must be a callable with the following 
    signature:
                    
    derivative(input, axis, output, mode, cval,
               *extra_arguments, **extra_keywords)

    The extra_arguments and extra_keywords arguments can be used to pass 
    extra arguments and keywords that are passed to derivative2 at each 
    call.
    """
    input = numarray.asarray(input)
    output, return_value = _ni_support._get_output(output, input)
    axes = range(input.rank)
    if len(axes) > 0:
        derivative(input, axes[0], output, mode, cval, *extra_arguments,
                   **extra_keywords)
        numarray.multiply(output, output, output)
        for ii in range(1, len(axes)):
            tmp = derivative(input, axes[ii], output.type(), mode, cval,
                             *extra_arguments, **extra_keywords)
            numarray.multiply(tmp, tmp, tmp)
            output += tmp
        numarray.sqrt(output, output)
    else:
        output[...] = input[...]
    return return_value
Example #3
0
def RMS(b):
    a = b.astype(Int32)
    multiply(a, a, a)
    ms = add.reduce(a)/len(a)
    rms = sqrt(ms)
    return rms
Example #4
0
def mad_combine(infileglob, outfilebase):
    # get list of files to operate on
    files = glob.glob(infileglob)
    # check more than one image exists
    if files < 2:
        print 'Less than two input files found!'
        return
    print 'Operating on images',infileglob
    # get images
    images = _get_images(files)
    lenimages = len(images)
    print 'Found', lenimages, 'images'
    # get header of first image
    hdr = images[0][0].header
    # get ascard of first image
    hdrascard = hdr.ascard
    # get data from images into num (untidy tuple concatenation)
    data = num.zeros((lenimages,) + images[0][0].data.shape, num.Float32)
    for i in range(lenimages):
        data[i,:,:] = images[i][0].data
    # delete array
    del images
    # sort data
    print 'Sorting... (this may take a while, i.e an hour or so!)'
    #data_sorted = num.sort(data, axis=0)
    data_sorted = data  # for debugging
    # find standard deviation of lowest n - n_discard pixels
    print 'Calculating mad_low...'
    mad_low = _mad3(data_sorted[:-n_discard,:,:])
    # correct for bias to stddev
    num.multiply(mad_low, corr[`lenimages - n_discard` + ',' + `lenimages`], mad_low)
    # make median image
    print 'Calculating median...'
    if lenimages%2 != 0:  # then odd number of images
        m = (lenimages - 1) / 2
        median = data_sorted[m,:,:]
    else:                   # even number of images
        m = lenimages / 2
        median = (data_sorted[m,:,:] + data_sorted[m-1,:,:]) / 2
    # delete array
    del data_sorted
    # get ccd properties from header
    # these keywords are for FORS2
    # - they may need altering for other instruments

    gain = hdr['OUT1GAIN'] # N_{ADU} = gain * N_{e-}
    invgain = 1.0 / gain   # N_{e-} = invgain * N_{ADU}
    ron  = hdr['OUT1RON']  # read out noise in e- 
    # take only +ve values in median
    median_pos = num.choose(median < 0.0, (median, 0.0))
    # calculate sigma due to ccd noise for each pixel
    print 'Calculating noise_med...'
    noise_med = num.sqrt(median_pos * invgain + ron*ron) * gain
    # delete array
    del median_pos
    # find maximum of noise and mad_low
    # -> sigma to test pixels against to identify cosmics
    print 'Calculating sigma_test...'
    sigma_test = num.choose(noise_med < mad_low, (noise_med, mad_low))
    # delete arrays
    del mad_low, noise_med
    # calculate 'relative residual' for each pixel
    print 'Calculating rel_res...'
    rel_res = num.zeros(data.shape, num.Float32)
    res = num.zeros(data[0].shape, num.Float32)
    for i in range(lenimages):
        num.subtract(data[i,:,:], median, res)
        num.divide(res, sigma_test, rel_res[i,:,:])
    # delete arrays
    del sigma_test, res
    # now average over all pixels for which rel_res < sigma_limit
    # first count number included for each pixel
    # by testing to produce a boolean array, then summing over.
    print 'Calculating included...'
    included = num.zeros(rel_res[0].shape, num.Int16)
    included[:,:] = num.sum(rel_res <= sigma_limit)
    # put all discarded pixels to zero
    print 'Calculating combined...'
    pre_combine = num.choose(rel_res <= sigma_limit, (0.0,data))
    # delete array
    del rel_res
    # sum all pixels and divide by included to give mean
    combined = num.sum(pre_combine)
    # delete array
    del pre_combine
    num.divide(combined, included, combined)
    # Work out errors on this combined image
    # take only +ve values in combined
    mean_pos = num.choose(combined < 0.0, (combined, 0.0))
    # calculate sigma due to ccd noise for each pixel
    print 'Calculating noise_mean...'
    noise_mean = num.sqrt(mean_pos * invgain + ron*ron) * gain
    # delete array
    del mean_pos
    # create standard error image
    print 'Calculating error...'
    error = noise_mean / num.sqrt(included)
    # delete array
    del noise_mean
    # write all images to disk
    print 'Writing images to disk...'
    _write_images(combined, error, included,
                  hdrascard, outfilebase)
Example #5
0
def distance_transform_edt(input, sampling = None, 
                        return_distances = True, return_indices = False,
                        distances = None, indices = None):
    """Exact euclidean distance transform.

    In addition to the distance transform, the feature transform can
    be calculated. In this case the index of the closest background
    element is returned along the first axis of the result.

    The return_distances, and return_indices flags can be used to
    indicate if the distance transform, the feature transform, or both
    must be returned.

    Optionally the sampling along each axis can be given by the
    sampling parameter which should be a sequence of length equal to
    the input rank, or a single number in which the sampling is assumed
    to be equal along all axes.

    the distances and indices arguments can be used to give optional
    output arrays that must be of the correct size and type (Float64
    and Int32).
    """
    if (not return_distances) and (not return_indices):
        msg = 'at least one of distances/indices must be specified'
        raise RuntimeError, msg
    ft_inplace = isinstance(indices, numarray.NumArray)
    dt_inplace = isinstance(distances, numarray.NumArray)
    # calculate the feature transform
    input = numarray.where(input, 1, 0).astype(numarray.Int8)
    if sampling is not None:
        sampling = _ni_support._normalize_sequence(sampling, input.rank)
        sampling = numarray.asarray(sampling, type = numarray.Float64)
        if not sampling.iscontiguous():
            sampling = sampling.copy()
    if ft_inplace:
        ft = indices
        if ft.shape != (input.rank,) + input.shape:
            raise RuntimeError, 'indices has wrong shape'
        if ft.type() != numarray.Int32:
            raise RuntimeError, 'indices must be of Int32 type'
    else:
        ft = numarray.zeros((input.rank,) + input.shape,
                            type = numarray.Int32) 
    _nd_image.euclidean_feature_transform(input, sampling, ft)
    # if requested, calculate the distance transform
    if return_distances:
        dt = ft - numarray.indices(input.shape, type = ft.type())
        dt = dt.astype(numarray.Float64)
        if sampling is not None:
            for ii in range(len(sampling)):
                dt[ii, ...] *= sampling[ii]
        numarray.multiply(dt, dt, dt)
        if dt_inplace:
            dt = numarray.add.reduce(dt, axis = 0)
            if distances.shape != dt.shape:
                raise RuntimeError, 'indices has wrong shape'
            if distances.type() != numarray.Float64:
                raise RuntimeError, 'indices must be of Float64 type'
            numarray.sqrt(dt, distances)
            del dt
        else:
            dt = numarray.add.reduce(dt, axis = 0)
            dt = numarray.sqrt(dt)
    # construct and return the result
    result = []
    if return_distances and not dt_inplace:
        result.append(dt)
    if return_indices and not ft_inplace:
        result.append(ft)
    if len(result) == 2:
        return tuple(result)
    elif len(result) == 1:
        return result[0]
    else:
        return None
Example #6
0
def RMS(b):
    a = b.astype(Int32)
    multiply(a, a, a)
    ms = add.reduce(a)/len(a)
    rms = sqrt(ms)
    return rms
Example #7
0
File: MOPfits.py Project: OSSOS/MOP
def _open_fix(file):
    """Takes in a fits file name, open the file in binary mode and creates an HDU.

    Will attempt to fix some of the header keywords to match the standard FITS format.
    """
    import pyfits, re, string
    temp = pyfits.HDUList()
    hdu = pyfits.PrimaryHDU()

    hdu._file=open(file,'rb')

    _number_RE = re.compile(
                r'(?P<sign>[+-])?0*(?P<digt>(\.\d+|\d+(\.\d*)?)([deDE][+-]?\d+)?)')

    ### here's the real difference between pyFits and cfh12kFits.
    ### I'm more flexible on the format of the header file so that allows me
    ### read more files.
    card_RE=re.compile(r"""
    (?P<KEY>[-A-Z0-9_a-za ]{8})   ### keyword is the first 8 bytes... i'll allow small letters
    (
     (
      (?P<VALUE>=\s)             ### =\s indicats a value coming.
      (\s*
       (   
        (?P<STRING>\'[^\']*[\'/])   ### a string 
        |
        (?P<FLOAT>([+-]?(\.\d+|\d+\.\d*)([dDEe][+-]?\d+)?))  ### a floating point number
        |
        (?P<INT>[+-]?\d+)      ### an integer
        |
        (?P<BOOL>[TFtf])       ### perhaps value is boolian
        )
       \s*
       (( / )?(?P<COMMENT>.*))?    ### value related comment.
      )
     )
     |
     (?P<C2>.*)     ### strickly a comment field
    )
    """,re.VERBOSE)

    
    done=0
    while ( not done):

        ### read a line of 80 characters up to a new line from the file.
        block=hdu._file.readline(80)

        string_end=79
        if len(block)== 0:
            done=1
            continue
        if block[-1]=='\n':
            string_end=len(block)-2

        line = re.match(r'[ -~]{0,'+str(string_end)+'}',block)

        line = string.ljust(line.group(0),80)[0:79]

        if line[0:8] == 'END     ':
            done=1
            break

        card=card_RE.match(line)
        if not card or not card.group('KEY'):
            print card.groups()
            raise SyntaxError("Failed to get keyword from FITS Card %s" % line)
            
        key=card.group('KEY')
        value=None
        if card.group('INT'):
            try:
                value=int(card.group('INT'))
            except:
                value=card.group('INT')
        elif card.group('FLOAT'):
            try:
                value=float(card.group('FLOAT'))
            except:
                value=float(card.group('FLOAT'))
        elif card.group('BOOL'):
            value=pyfits.Boolean(card.group('BOOL'))
        elif card.group('STRING'):
            value=card.group('STRING')[1:-1]
            
        if card.group('COMMENT'):
            _comment=card.group('COMMENT')
        elif card.group('C2'):
            _comment=card.group('C2')
        else:
            _comment=None

        try:
            if key =='COMMENT ':
                hdu.header.add_comment(_comment)
            elif key =='HISTORY ':
                hdu.header.add_history(_comment)
            elif key =='        ':
                hdu.header.add_blank(_comment)
            elif key:
                if key =='DATE-OBS' and value:
                    value=string.replace(value,'/','-')
                hdu.header.update(key,value,comment=_comment)
        except:
            raise SyntaxError("Failed to convert line to FITS Card %s" % line)

    ### set some internal variables to decided on data flow.
    hdu._bzero=hdu.header.get('BZERO',0)
    hdu._bscale=hdu.header.get('BSCALE',1)
    hdu._bitpix=hdu.header.get('BITPIX',-16)

    if hdu.header.get('NAXIS',0)>0:
        naxis1=hdu.header.get('NAXIS1',1)
        naxis2=hdu.header.get('NAXIS2',1)
    ### now read the data... this is a HACK from pyfits.py
        import numarray as num
    
        code = pyfits._ImageBaseHDU.NumCode[hdu._bitpix]
        dims = tuple([naxis2,naxis1])
        raw_data = num.fromfile(hdu._file,type=code,shape=dims)
        raw_data._byteorder='big'

        if ( hdu._bzero != 0
             or hdu._bscale!=1 ):
            if  hdu._bitpix > 0 :
                hdu.data=num.array(raw_data,type=num.Float32)
            else:
                hdu.data=raw_data
            if hdu._bscale != 1:
                num.multiply(hdu.data,hdu._bscale,hdu.data)
            if hdu._bzero!=0:
                hdu.data=hdu.data + hdu._bzero

            del hdu.header['BSCALE']
            del hdu.header['BZERO']
            hdu.header['BITPIX']=pyfits._ImageBaseHDU.ImgCode[hdu.data.type()]
            
    temp.append(hdu)
    return temp
Example #8
0
def _open_fix(file):
    """Takes in a fits file name, open the file in binary mode and creates an HDU.

    Will attempt to fix some of the header keywords to match the standard FITS format.
    """
    import pyfits, re, string
    temp = pyfits.HDUList()
    hdu = pyfits.PrimaryHDU()

    hdu._file = open(file, 'rb')

    _number_RE = re.compile(
        r'(?P<sign>[+-])?0*(?P<digt>(\.\d+|\d+(\.\d*)?)([deDE][+-]?\d+)?)')

    ### here's the real difference between pyFits and cfh12kFits.
    ### I'm more flexible on the format of the header file so that allows me
    ### read more files.
    card_RE = re.compile(
        r"""
    (?P<KEY>[-A-Z0-9_a-za ]{8})   ### keyword is the first 8 bytes... i'll allow small letters
    (
     (
      (?P<VALUE>=\s)             ### =\s indicats a value coming.
      (\s*
       (   
        (?P<STRING>\'[^\']*[\'/])   ### a string 
        |
        (?P<FLOAT>([+-]?(\.\d+|\d+\.\d*)([dDEe][+-]?\d+)?))  ### a floating point number
        |
        (?P<INT>[+-]?\d+)      ### an integer
        |
        (?P<BOOL>[TFtf])       ### perhaps value is boolian
        )
       \s*
       (( / )?(?P<COMMENT>.*))?    ### value related comment.
      )
     )
     |
     (?P<C2>.*)     ### strickly a comment field
    )
    """, re.VERBOSE)

    done = 0
    while (not done):

        ### read a line of 80 characters up to a new line from the file.
        block = hdu._file.readline(80)

        string_end = 79
        if len(block) == 0:
            done = 1
            continue
        if block[-1] == '\n':
            string_end = len(block) - 2

        line = re.match(r'[ -~]{0,' + str(string_end) + '}', block)

        line = string.ljust(line.group(0), 80)[0:79]

        if line[0:8] == 'END     ':
            done = 1
            break

        card = card_RE.match(line)
        if not card or not card.group('KEY'):
            print card.groups()
            raise SyntaxError("Failed to get keyword from FITS Card %s" % line)

        key = card.group('KEY')
        value = None
        if card.group('INT'):
            try:
                value = int(card.group('INT'))
            except:
                value = card.group('INT')
        elif card.group('FLOAT'):
            try:
                value = float(card.group('FLOAT'))
            except:
                value = float(card.group('FLOAT'))
        elif card.group('BOOL'):
            value = pyfits.Boolean(card.group('BOOL'))
        elif card.group('STRING'):
            value = card.group('STRING')[1:-1]

        if card.group('COMMENT'):
            _comment = card.group('COMMENT')
        elif card.group('C2'):
            _comment = card.group('C2')
        else:
            _comment = None

        try:
            if key == 'COMMENT ':
                hdu.header.add_comment(_comment)
            elif key == 'HISTORY ':
                hdu.header.add_history(_comment)
            elif key == '        ':
                hdu.header.add_blank(_comment)
            elif key:
                if key == 'DATE-OBS' and value:
                    value = string.replace(value, '/', '-')
                hdu.header.update(key, value, comment=_comment)
        except:
            raise SyntaxError("Failed to convert line to FITS Card %s" % line)

    ### set some internal variables to decided on data flow.
    hdu._bzero = hdu.header.get('BZERO', 0)
    hdu._bscale = hdu.header.get('BSCALE', 1)
    hdu._bitpix = hdu.header.get('BITPIX', -16)

    if hdu.header.get('NAXIS', 0) > 0:
        naxis1 = hdu.header.get('NAXIS1', 1)
        naxis2 = hdu.header.get('NAXIS2', 1)
        ### now read the data... this is a HACK from pyfits.py
        import numarray as num

        code = pyfits._ImageBaseHDU.NumCode[hdu._bitpix]
        dims = tuple([naxis2, naxis1])
        raw_data = num.fromfile(hdu._file, type=code, shape=dims)
        raw_data._byteorder = 'big'

        if (hdu._bzero != 0 or hdu._bscale != 1):
            if hdu._bitpix > 0:
                hdu.data = num.array(raw_data, type=num.Float32)
            else:
                hdu.data = raw_data
            if hdu._bscale != 1:
                num.multiply(hdu.data, hdu._bscale, hdu.data)
            if hdu._bzero != 0:
                hdu.data = hdu.data + hdu._bzero

            del hdu.header['BSCALE']
            del hdu.header['BZERO']
            hdu.header['BITPIX'] = pyfits._ImageBaseHDU.ImgCode[
                hdu.data.type()]

    temp.append(hdu)
    return temp
Example #9
0
def distance_transform_edt(input,
                           sampling=None,
                           return_distances=True,
                           return_indices=False,
                           distances=None,
                           indices=None):
    """Exact euclidean distance transform.

    In addition to the distance transform, the feature transform can
    be calculated. In this case the index of the closest background
    element is returned along the first axis of the result.

    The return_distances, and return_indices flags can be used to
    indicate if the distance transform, the feature transform, or both
    must be returned.

    Optionally the sampling along each axis can be given by the
    sampling parameter which should be a sequence of length equal to
    the input rank, or a single number in which the sampling is assumed
    to be equal along all axes.

    the distances and indices arguments can be used to give optional
    output arrays that must be of the correct size and type (Float64
    and Int32).
    """
    if (not return_distances) and (not return_indices):
        msg = 'at least one of distances/indices must be specified'
        raise RuntimeError, msg
    ft_inplace = isinstance(indices, numarray.NumArray)
    dt_inplace = isinstance(distances, numarray.NumArray)
    # calculate the feature transform
    input = numarray.where(input, 1, 0).astype(numarray.Int8)
    if sampling is not None:
        sampling = _ni_support._normalize_sequence(sampling, input.rank)
        sampling = numarray.asarray(sampling, type=numarray.Float64)
        if not sampling.iscontiguous():
            sampling = sampling.copy()
    if ft_inplace:
        ft = indices
        if ft.shape != (input.rank, ) + input.shape:
            raise RuntimeError, 'indices has wrong shape'
        if ft.type() != numarray.Int32:
            raise RuntimeError, 'indices must be of Int32 type'
    else:
        ft = numarray.zeros((input.rank, ) + input.shape, type=numarray.Int32)
    _nd_image.euclidean_feature_transform(input, sampling, ft)
    # if requested, calculate the distance transform
    if return_distances:
        dt = ft - numarray.indices(input.shape, type=ft.type())
        dt = dt.astype(numarray.Float64)
        if sampling is not None:
            for ii in range(len(sampling)):
                dt[ii, ...] *= sampling[ii]
        numarray.multiply(dt, dt, dt)
        if dt_inplace:
            dt = numarray.add.reduce(dt, axis=0)
            if distances.shape != dt.shape:
                raise RuntimeError, 'indices has wrong shape'
            if distances.type() != numarray.Float64:
                raise RuntimeError, 'indices must be of Float64 type'
            numarray.sqrt(dt, distances)
            del dt
        else:
            dt = numarray.add.reduce(dt, axis=0)
            dt = numarray.sqrt(dt)
    # construct and return the result
    result = []
    if return_distances and not dt_inplace:
        result.append(dt)
    if return_indices and not ft_inplace:
        result.append(ft)
    if len(result) == 2:
        return tuple(result)
    elif len(result) == 1:
        return result[0]
    else:
        return None