Example #1
0
 def getpic(self, img_name, channel_num):
     self.img_name = img_name
     os.system("streamer -c /dev/video" + str(channel_num) + " -f jpeg -o " + self.img_name + ".jpeg")
     #        print 'streamer -c /dev/video0 -f jpeg -o '+ self.img_name + '.jpeg'
     img = mpimg.imread(self.img_name + ".jpeg", "jpeg")
     ndi.median_filter(img, 2)
     return img
Example #2
0
def randomSurface():
    ### create a random array
    print "Creating a random array"
    import random

    shape = []
    for i in range(3):
        dim = int(random.random() * 50 + 1) * 4
        shape.append(dim)
        # print "Shape of array: ", shape
    rand = numpy.random.random(shape)
    ### zero the edges
    rand[0, :, :] = 0.0
    rand[:, 0, :] = 0.0
    rand[:, :, 0] = 0.0
    rand[shape[0] - 1, :, :] = 0.0
    rand[:, shape[1] - 1, :] = 0.0
    rand[:, :, shape[2] - 1] = 0.0
    ### reduce the randomness
    rand = ndimage.median_filter(rand, size=2)
    rand = ndimage.median_filter(rand, size=3)
    rand = ndimage.median_filter(rand, size=4)
    ### make it boolean
    array = numpy.where(rand > 0.2, 1.0, 0.0)
    return array
def depth_disconts2(di, tol=0.2):
    import pydb

    pydb.set_trace()
    import scipy.ndimage as ndimage

    ndimage.gaussian_filter(di, sigma=(5, 5))
    ndimage.gaussian_derivative()
    ndimage.median_filter()
    return di_diff
    def medianClicked(self):
        global _img, red, green, blue

        red = ndimage.median_filter(red, size=15)
        green = ndimage.median_filter(green, size=15)
        blue = ndimage.median_filter(blue, size=15)

        for a in range(0, _img.shape[0]):
            for b in range(0, _img.shape[1]):
                _img[a, b, 0] = red[a, b]
                _img[a, b, 1] = green[a, b]
                _img[a, b, 2] = blue[a, b]
        updateImage()
Example #5
0
def local_median_val(u, v, u_threshold, v_threshold, size=1):
    """Eliminate spurious vectors with a local median threshold.
    
    This validation method tests for the spatial consistency of the data.
    Vectors are classified as outliers and replaced with Nan (Not a Number) if
    the absolute difference with the local median is greater than a user 
    specified threshold. The median is computed for both velocity components.
    
    Parameters
    ----------
    u : 2d np.ndarray
        a two dimensional array containing the u velocity component.
        
    v : 2d np.ndarray
        a two dimensional array containing the v velocity component.
        
    u_threshold : float
        the threshold value for component u
        
    v_threshold : float
        the threshold value for component v
        
    Returns
    -------
    u : 2d np.ndarray
        a two dimensional array containing the u velocity component, 
        where spurious vectors have been replaced by NaN.
        
    v : 2d np.ndarray
        a two dimensional array containing the v velocity component, 
        where spurious vectors have been replaced by NaN.
        
    mask : boolean 2d np.ndarray 
        a boolean array. True elements corresponds to outliers.
        
    """

    um = median_filter(u, size=2 * size + 1)
    vm = median_filter(v, size=2 * size + 1)

    ind = (np.abs((u - um)) > u_threshold) | (np.abs((v - vm)) > v_threshold)

    u[ind] = np.nan
    v[ind] = np.nan

    mask = np.zeros(u.shape, dtype=bool)
    mask[ind] = True

    return u, v, mask
def make_gif2():
    t = sp.arange(-5.0, 5.0, 0.01)
    for sigma in range(1, 10):
        pylab.clf()
        pylab.figtext(0, 0, "sigma={0}".format(2 ** sigma))
        pylab.axis([-6.0, 6.0, -1.1, 1.1])
        A = ndi.median_filter(shallow(t), 2.0 ** sigma)
        B = ndi.median_filter(steep(t), 2.0 ** sigma)
        pylab.plot(t, A, "b")
        pylab.plot(t, B, "r")
        pylab.plot(t, -1.0 * (A > 0), "b")
        pylab.plot(t, (B > 0), "r")
        pylab.plot(t, zero(t), "k")
        pylab.draw()
        pylab.savefig("frame{0}".format(sigma))
Example #7
0
    def find_errors(self):
        """Detect spikes in d = likely genotype errors. A spike in a single child
        means an error in that child. A spike in all but the template means
        an error in the template. The other indecisive cases are marked as errors."""

        # Use median filter to detect spikes in d
        filtered_diff = ndimage.median_filter(self.d, (self.error_filter_length, 1))
        # filtered_diff = ndimage.median_filter(self.d, (self.error_filter_length,1), mode='constant', cval=0)
        # ndimage.median_filter(self.d, (3,1), mode='constant', cval=0)
        # filtered_diff = util.max_except_center_filter1d(self.d, 5)
        # print self.d
        # print filtered_diff
        # print filtered_diff2
        # filtered_diff = util.max_except_center_filter(d_snps, error_filter_length)
        difference = (self.d != filtered_diff).astype("int8")
        num_diff = np.sum(difference, axis=1)
        errors = np.nonzero(num_diff)[0]
        error_dict = {}
        if errors.size:
            num_errors = num_diff[errors] if errors.size else np.array([])
            for error_type in FamilyIbdComputer.ERROR_TYPES:
                (snp_index, child_index) = self.__error_index(difference, errors, num_errors, error_type)
                error_dict[error_type] = (snp_index, child_index)

        # Remove errors from data array
        self.dorig = self.d.copy()
        self.d = filtered_diff
        return error_dict
def Highpass(image):
    """
    Returns: image after passing it through a highpass filter. 
    ----------------------------------------------------------------------
    Parameters: image
    """
    return image - median_filter(image, size=9)
Example #9
0
def baseline_and_deglitch(orig_spec, ww=300, sigma_cut=4.0, poly_n=2.0, filt_width=7.0):
    """
    (1) Calculate a rolling standard deviation (s) in a window
        of 2*ww pixels
    (2) Mask out portions of the spectrum where s is more than
        sigma_cut times the median value for s.
    (3) Fit and subtract-out a polynomial of order poly_n 
        (currently hard-coded to 2)
    (4) Median filter (with a filter width of filt_width)
        to remove the single-channel spikes seen.
    """

    ya = rolling_window(orig_spec, ww * 2)
    # Calculate standard dev and pad the output
    stds = my_pad.pad(np.std(ya, -1), (ww - 1, ww), mode="edge")
    # Figure out which bits of the spectrum have signal/glitches
    med_std = np.median(stds)
    std_std = np.std(stds)
    sigma_x_bar = med_std / np.sqrt(ww)
    sigma_s = (1.0 / np.sqrt(2.0)) * sigma_x_bar
    # Mask out signal for baseline
    masked = ma.masked_where(stds > med_std + sigma_cut * sigma_s, orig_spec)
    xx = np.arange(masked.size)
    ya = ma.polyfit(xx, masked, 2)
    baseline = ya[0] * xx ** 2 + ya[1] * xx + ya[2]
    sub = orig_spec - baseline
    # Filter out glitches in baseline-subtracted version
    final = im.median_filter(sub, filt_width)[::filt_width]
    return final
Example #10
0
def find_feature_mask_simple(s_msk, sigma=1, ax=None, x_values=None):
    """
    sigma is estimated globally.
    """
    # find emission features from observed spec.

    filtered_spec = s_msk - ni.median_filter(s_msk, 15)
    # filtered_spec = ni.gaussian_filter1d(filtered_spec, 0.5)
    # smoothed_std = get_smoothed_std(filtered_spec,
    #                                rad=3, smooth_length=3)
    std = filtered_spec.std()
    for i in [0, 1]:
        std = filtered_spec[np.abs(filtered_spec) < 3 * std].std()

    emission_feature_msk_ = filtered_spec > sigma * std
    # emission_feature_msk_ = ni.binary_closing(emission_feature_msk_)
    emission_feature_msk = ni.binary_opening(emission_feature_msk_, iterations=1)

    if ax is not None:
        if x_values is None:
            x_values = np.arange(len(s_msk))

        # ax.plot(x_values, s_msk)
        ax.plot(x_values, filtered_spec)
        # ax.plot(x_values, smoothed_std)
        ax.axhline(sigma * std)
        ax.plot(x_values[emission_feature_msk], emission_feature_msk[emission_feature_msk], "ys", mec="none")

    return emission_feature_msk
def segmentlocal(img, filter_size=5, level=0.2, selem_size=3, rescale_low=(0, 50), rescale_high=(50, 100)):
    """
    Segment image using gradients calculated locally. Apply a Wiener filter to 
    remove salt-and-pepper noise, then calculate gradients over local 
    neighborhoods using the specified structuring element. Threshold the image 
    constructed my multiplying the rescaled original image with its rescaled 
    derivative (this helps define the edges).

    args:
        img (ndarray): input image

    kwargs:
        filter_size (int): neighborhood size of Wiener filter
        selem_size (int): size of the disk structuring element
        level (float): threshold level, specified as a fraction of the 
            calculated Otsu threshold; must by in the range [0, 1]
        rescale_low (tuple): (low, high) values used to rescale original image
        rescale_high (tuple): (low, high) values used to rescale edges image

    returns:
        ndarray: thresholded binary image (dtype = bool)

    """
    img2 = wiener(img, (filter_size, filter_size))
    img3 = median_filter(img2, filter_size)

    img4 = complement(rescale(img3, rescale_low))

    img5 = bottomhat(img_as_uint12(img3), disk(selem_size))
    img6 = fillholes(img5)
    img7 = rescale(img6, rescale_high)

    img8 = img4 * img7
    return threshold(img8, level=level)
def threshold_image(np_image, n=[]):
    if len(n) < 5:
        # First capture a few images - give the camera time to adjust...
        n.append(np_image[:])
        return np_image

    original = n[4]
    img = np_image[:]

    # Take the difference between the original frame and the current frame
    differenceImage = abs(np_image.astype(int) - original.astype(int)).astype(uint8)

    # amount of "Change" required before something will show up
    thresholdValue = 30

    # Take the N-Dimensional difference (3 channels of binary)
    differenceImage = differenceImage >= thresholdValue

    # Convert this to one channel binary
    differenceImage = differenceImage.mean(2).astype(bool)

    # Remove Salt & Pepper Noise
    differenceImage = ndimage.median_filter(differenceImage, size=5)

    # Create a black image of same type and shape as input
    output = zeros(img.shape).astype(img.dtype)

    # Take the original pixel at every point where the image is "different"
    output[differenceImage] = img[differenceImage]
    return output
Example #13
0
def process(
    parsed,
    target,
    temp_metatile,
    temp_processed,
    save_offsetx,
    save_offsety,
    save_xsize,
    save_ysize,
    nodata,
    ot,
    *args,
    **kwargs
):

    scale = parsed.s
    zfactor = parsed.z
    altitude = parsed.alt
    process_hillshade = "gdaldem hillshade -s %s -z %s -alt %s %s -of GTiff %s > /dev/null" % (
        scale,
        zfactor,
        altitude,
        temp_metatile,
        temp_processed,
    )
    os.system(process_hillshade)
    nodata = 0
    ot = "-ot Byte"
    # open in numpy
    _, gt, _, nodata, array_numpy = numpy_read(temp_processed)
    processed_numpy = ndimage.median_filter(array_numpy, size=3)
    numpy_save(processed_numpy, target, save_offsetx, save_offsety, save_xsize, save_ysize, gt, nodata, ot)
Example #14
0
    def find(self):
        """
        Find all pixels above the median pixel after smoothing with a Gaussian filter.

        :Note: maybe one should use mode instead of median
        """
        # smooth the image
        img = ndimage.gaussian_filter(self.image, sigma=self.settings["sigma"])

        # find pixels above the median
        msk = self.image > np.median(img)
        # get background image and calculate statistics
        backgrd = self.image[~msk]
        std = np.std(backgrd).item()  # items required if image was memmap'ed by pyfits
        mean = np.mean(backgrd[backgrd > 0.0]).item()  # items required if image was memmap'ed by pyfits
        rms = np.sqrt(std ** 2 + mean ** 2)

        print "Background: average={0:.4f} and rms={1:.4f}".format(mean, rms)

        # find objects above the background
        self.mask = (
            ndimage.median_filter(self.image, self.settings["sigma"]) > rms * self.settings["above_background"] + mean
        )
        # mask_pix = im > rms * above_background + mean
        # mask = (mask + mask_pix) >= 1

        # get labels
        self.label_im, self.nb_labels = ndimage.label(self.mask)

        print "Finished the initial run and found {0:d} objects...".format(self.nb_labels)

        return self.mask, self.label_im, self.nb_labels
Example #15
0
def get_variance_map2(a_plus_b, a_minus_b, bias_mask2, pix_mask, gain):
    # variance0 = a_minus_b
    # a_minus_b = a-b
    msk = bias_mask2 | pix_mask | ~np.isfinite(a_minus_b)

    from destriper import destriper

    variance0 = destriper.get_destriped(a_minus_b, msk, pattern=64, remove_vertical=False, hori=False)
    # variance0 = a_minus_b

    # stsci_median cannot be used due to too many array error.
    # ss = stsci_median([m1 for m1 in variance0],)
    dd1 = np.ma.array(variance0, mask=msk)
    ss = np.ma.median(dd1, axis=0)

    variance_ = variance0.copy()
    variance_[msk] = np.nan

    st = np.nanstd(variance_)
    st = np.nanstd(variance_[np.abs(variance_) < 3 * st])

    variance_[np.abs(variance_ - ss) > 3 * st] = np.nan

    import scipy.ndimage as ni

    x_std = ni.median_filter(np.nanstd(variance_, axis=0), 11)

    variance_map0 = np.zeros_like(variance_) + x_std ** 2

    variance_map = variance_map0 + np.abs(a_plus_b) / gain  # add poison noise in ADU
    return variance_map
Example #16
0
def temporal_median(img=None, weight=1.0, window_shape=None):
    """
    Subtracts the median calculated in time, for each pixel.
    Median filter works well for sparse images.

    Parameters
    ----------
    img : array_like
        Series of images as a 3D numpy array, or a list or a set
    weight : scalar
        Fraction of median to be subtracted from each pixel.
        Value of `weight` should be in the interval (0.0,1.0).
    window_shape : tuple of integers
        Specifies the shape of the window as follows (dt, dy, dx)

    """
    time_axis = 0
    nb_imgs = img.shape[time_axis]
    if img.ndim <= 2 or nb_imgs <= 1:
        raise ValueError("Need more than one image to apply temporal filtering.")

    if window_shape is None:
        window_shape = (nb_imgs, 1, 1)
    elif not isinstance(window_shape, tuple):
        raise ValueError("window_shape must be a tuple.")
    elif window_shape[0] <= 1:
        raise ValueError("Cannot perform temporal filtering, try spatial filtering.")

    img_out = img - weight * nd.median_filter(img, size=window_shape)
    return img_out
Example #17
0
def bfixpix(image_file, mask_file, outsuffix="_f", msksuffix="_s"):
    """
    Inputs
    ---------
    image_file : string
        input image file to fix bad pixels on

    mask_file : string
        mask file (0 == good pixels, >0 == bad pixels

    outsuffix : string
        suffix for fixed image. default = '_f'

    msksuffix : string
        suffix for bad pixels significance mask. default = '_s'
    """
    outf = image_file.replace(".fits", outsuffix + ".fits")
    outm = image_file.replace(".fits", msksuffix + ".fits")

    util.rmall([outf, outm])
    print("bfixpix: {0} -> {1}".format(image_file, outf))

    # fetch the image, fetch the mask
    img, hdr = pyfits.getdata(image_file, header=True)
    msk = pyfits.getdata(mask_file)

    # median the image
    medimg = ndimage.median_filter(img, 3, mode="nearest")

    # generate the pixel files
    outf_img = np.where(msk == 0, img, medimg)
    outm_img = np.where(msk == 1, (img - medimg), 0)

    pyfits.writeto(outf, outf_img, hdr)
    pyfits.writeto(outm, outm_img, hdr)
Example #18
0
    def split_traces(self, ds, threshold=30000):
        self._prepare_find_jumps()
        ds = self._hf[ds]
        # first we remove a bit of noise, size is the number of averages
        # flt = gaussian_filter1d(ds,10)
        flt = median_filter(ds, size=3)
        # flt = ds
        # the sobel filter finds the "jumps"
        sb = sobel(flt)
        for i in sb:
            self.qps_jpn_hight.append(float(i))

        # for i in flt: self.qps_jpn_spec.append(float(i))
        offset = ds[0]
        tr_num = 0
        tr_name = "qps_tr_" + str(tr_num)
        tr_obj = self._hf.add_value_vector(tr_name, folder="analysis", x=self._x_co, unit="Hz")
        keepout = 4
        for i, tr in enumerate(flt):
            keepout += 1
            if abs(sb[i]) > threshold and keepout > 3:
                keepout = 0
                # new trace
                tr_num += 1
                tr_name = "qps_tr_" + str(tr_num)
                tr_obj = self._hf.add_value_vector(tr_name, folder="analysis", x=self._x_co, unit="Hz")
                print tr, i
                # tr_obj.append(float(tr))
            else:
                if keepout > 2:
                    tr_obj.append(float(tr - offset))
Example #19
0
def load_mrlc(f):
    # pale green background by default
    sfcinfo, surfacelayers = load_raster(f)
    surface = surfacelayers[0]

    # nearest-neighbor upsampling creates chunky pixels - smooth
    #  the edges with a median filter since we're in index space.
    # doing this in the index space keeps sharp edges on the
    # landcover areas, but rounded rather than blocky.
    # compare image resolution to 30m.
    # mrlc is actually 1 arcsecond but 30m is close enough
    # for choosing a filter parameter.
    scale = 30.0 / sfcinfo.grid.meters_per_grid()
    do_median = 1
    if do_median and scale > 2.0:
        # kernel size, an odd number that is near the diameter of an mrlc cell in the image grid
        ksize = 2 * int(scale / 2) + 1
        if ksize > 21:
            # arbitrary limit so we can scale up without blowing up
            ksize = 21
        surface = ndimage.median_filter(surface, size=(ksize, ksize))

    # build the colormap
    cmap = N.zeros((256, 3), N.float32)
    for code, info in landcover_info.iteritems():
        cmap[int(code), :] = [int(cc, 16) for cc in re.match("#(..)(..)(..)", info["natural"]).groups()]

    # apply the colormap and transform to 0..1
    surface = cmap[surface] / 255.0

    return sfcinfo, surface
    def testAcquire(self):
        im = self.acquireImage()
        if im is None:
            return

            # filter
        im = numpy.asarray(im, dtype=numpy.float32)
        medfilt = int(self.settings["medfilt"])
        lowfilt = float(self.settings["lowfilt"])
        if medfilt > 1:
            im = ndimage.median_filter(im, size=medfilt)
        if lowfilt > 0:
            im = ndimage.gaussian_filter(im, lowfilt)
        self.setImage(im)

        # find regions
        minsize = self.settings["minsize"]
        maxsize = self.settings["maxsize"]
        timeout = 300
        # regions, image  = libCVwrapper.FindRegions(im, minsize, maxsize)
        self.logger.info("running libCV.FindRegions, timeout = %d" % (timeout,))
        try:
            regions, image = pyami.timedproc.call(
                "leginon.libCVwrapper", "FindRegions", args=(im, minsize, maxsize), timeout=timeout
            )
        except:
            self.logger.error("libCV.FindRegions failed")
            regions = []
            image = None

            # this is copied from targetfinder:
            # regions,image = libCVwrapper.FindRegions(self.mosaicimage, minsize, maxsize)
        n = len(regions)
        self.logger.info("Regions found: %s" % (n,))
        self.displayRegions(regions)
Example #21
0
    def testAcquire(self):
        im = self.acquireImage()
        if im is None:
            return

        # filter
        im = numpy.asarray(im, dtype=numpy.float32)
        medfilt = int(self.settings["medfilt"])
        lowfilt = float(self.settings["lowfilt"])
        if medfilt > 1:
            im = ndimage.median_filter(im, size=medfilt)
        if lowfilt > 0:
            im = ndimage.gaussian_filter(im, lowfilt)
        self.setImage(im)

        # find regions
        minsize = self.settings["minsize"]
        maxsize = self.settings["maxsize"]
        regions, image = libCVwrapper.FindRegions(im, minsize, maxsize)

        # this is copied from targetfinder:
        # regions,image = libCVwrapper.FindRegions(self.mosaicimage, minsize, maxsize)
        n = len(regions)
        self.logger.info("Regions found: %s" % (n,))
        self.displayRegions(regions)
Example #22
0
def grey_processing(inputImg):
    #    fp = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
    fp = np.ones((3, 3))
    data = nd.median_filter(inputImg, size=7)
    data = nd.grey_closing(data, footprint=fp)
    data = nd.grey_opening(data, footprint=fp)
    return data
Example #23
0
def prep_data(dataset):
    df = dataset.copy()

    latlon = list(zip(df.lat, df.lon))
    dist = np.array([distance(latlon[i + 1], latlon[i]) for i in range(len((latlon[:-1])))])

    df["dist"] = np.concatenate(([0], np.cumsum(dist)))

    slope = np.abs(100 * np.diff(df.alt) / (1000 * dist))
    slope[np.where(slope < 4)] = 0  # "green"
    slope[np.where((slope >= 4) & (slope < 6))] = 1  # "yellow"
    slope[np.where((slope >= 6) & (slope < 10))] = 2  # "pink"
    slope[np.where((slope >= 10) & (slope < 15))] = 3  # "orange"
    slope[np.where(slope >= 15)] = 4  # "red"
    slope = im.median_filter(slope, 6)

    colors = np.empty_like(slope, dtype=object)
    colors[np.where(slope == 0)] = "green"
    colors[np.where(slope == 1)] = "yellow"
    colors[np.where(slope == 2)] = "pink"
    colors[np.where(slope == 3)] = "orange"
    colors[np.where(slope == 4)] = "red"
    df["colors"] = list(colors) + [None]  # NOTE: add [None] just make pandas happy

    return df
Example #24
0
def denoise(Xs, ys, alpha=0.4, sigma1=2, sigma2=3, ratio=0.6, t=None):
    print "denoising images..."

    if ratio > 1.0:
        print "Do you really want a ratio of %f for denoise?" % ratio
        print "Every images produced will always be similar"

    rand_list = randomindex(Xs.shape[0] * ratio, Xs.shape[0])
    Xs = Xs[rand_list]
    ys = ys[rand_list]

    tx = []
    ty = []

    for X, y in zip(Xs, ys):
        noisy = X + alpha * X.std() * np.random.random(X.shape)

        gauss_denoised = ndimage.gaussian_filter(noisy, sigma1)

        med_denoised = ndimage.median_filter(noisy, sigma2)

        tx.append(med_denoised)
        ty.append(y)

        if t:
            t.print_update(1)

    return np.array(tx), np.array(ty)
Example #25
0
def main(root, fname_base, outfname):
    searchterm = root + "/" + fname_base + "*.fits"
    print(searchterm)
    fitsfiles = glob.glob(searchterm)
    fitsfiles.sort()
    datatype = determine_data_type(fitsfiles)
    settings = TypeSettings(datatype)

    FFMpegWriter = manimation.writers["ffmpeg"]
    metadata = dict(title="IRTF " + outfname, artist="Matplotlib", comment="IRTF" + str(settings.zeroshape))
    writer = FFMpegWriter(fps=5, metadata=metadata)

    fig = plt.figure(figsize=settings.figsize)
    handle = plt.imshow(
        np.zeros(settings.zeroshape), cmap=settings.cmap, interpolation="nearest", aspect=settings.aspect, vmax=2000
    )

    tomovie = fitsfiles
    with writer.saving(fig, outfname + ".mp4", 100):
        for fitsfile in tomovie:
            print(fitsfile)
            no = fitsfile.split("-")[1][:5]
            time = pyfits.getheader(fitsfile)["TIME_OBS"]
            handle.get_axes().set_title(no + " " + time)
            data = read_scan1(fitsfile)
            filtered = median_filter(data, 2)
            handle.set_data(filtered)
            writer.grab_frame()
def segmentglobal(img, filter_size=5, level=0.7):
    """
    Segment image using gradients calculated globally. Apply a Wiener filter to
    remove salt-and-pepper noise and a median filter to smooth edges, then 
    calculate gradients across the entire image (between adjacent pixels in 
    both directions). Threshold the image constructed my multiplying the 
    original image with its derivative (this helps define the edges).

    args:
        img (ndarray): input image

    kwargs:
        filter_size (int): neighborhood size of Wiener and median filters
        level (float): threshold level, specified as a fraction of the 
            calculated Otsu threshold; must by in the range [0, 1]

    returns:
        ndarray: thresholded binary image (dtype = bool)

    """
    img2 = complement(img, 1.0)
    img3 = wiener(img2, (filter_size, filter_size))
    img4 = median_filter(img3, filter_size)
    img5 = energy(img4)
    img6 = (1 + img5) * img4
    return threshold(img6, level=level).astype(bool)
    def processAndSaveFFT(self, imgdata, fftpath):
        if os.path.isfile(fftpath):
            print "FFT file found"
            if fftpath in self.freqdict.keys():
                print "Freq found"
                return False
            print "Freq not found"
        print "creating FFT file: ", fftpath

        ### downsize and filter leginon image
        if self.params["uncorrected"]:
            imgarray = imagefilter.correctImage(imgdata, params)
        else:
            imgarray = imgdata["image"]

            ### calculate power spectra
        apix = apDatabase.getPixelSize(imgdata)
        fftarray, freq = ctfpower.power(imgarray, apix, mask_radius=0.5, fieldsize=self.params["fieldsize"])
        # fftarray = imagefun.power(fftarray, mask_radius=1)

        fftarray = ndimage.median_filter(fftarray, 2)

        ## preform a rotational average and remove peaks
        rotfftarray = ctftools.rotationalAverage2D(fftarray)
        stdev = rotfftarray.std()
        rotplus = rotfftarray + stdev * 4
        fftarray = numpy.where(fftarray > rotplus, rotfftarray, fftarray)

        ### save to jpeg
        self.freqdict[fftpath] = freq
        mrc.write(fftarray, fftpath)

        self.saveFreqFile()

        return True
def run(f):
    patient_id = os.path.basename(f)[: -len("_seg.nii.gz")]

    # if patient_id != "1585":
    #     return

    print "PATIENT_ID", patient_id

    f_img = img_folder + "/" + patient_id + ".nii"
    if not os.path.exists(f_img):
        f_img += ".gz"

    seg = irtk.imread(f, dtype="float32", force_neurological=True)
    img = irtk.imread(f_img, dtype="float32", force_neurological=True)

    img = irtk.Image(nd.median_filter(img.view(np.ndarray), (3, 5, 5)), img.get_header())

    ga = all_ga[patient_id]

    scale = get_CRL(ga) / get_CRL(30.0)

    # if all_iugr[patient_id][0] == 1:
    #     scale = (get_weight(ga,0.02) / get_weight(30,0.5)) ** (1.0/3.0)
    # else:
    #     scale = (get_weight(ga,0.5) / get_weight(30,0.5)) ** (1.0/3.0)

    seg = seg.resample(1.0 * scale, interpolation="nearest")
    img = img.resample(1.0 * scale, interpolation="bspline")

    irtk.imwrite(output_folder + "/data_resampled_weight/" + patient_id + "_img.nii.gz", img)
    irtk.imwrite(output_folder + "/data_resampled_weight/" + patient_id + "_seg.nii.gz", seg)

    return
	def stripNonSushiColors(self, img, img_hsv, img_middle):
		#TODO custom defined bound, may want to get from training
		MIN_HUE = np.array([30,40,40])
		MAX_HUE = np.array([150,256,256])
		
		#regions with non-sushi colors
		hue_regions = cv2.inRange(img_hsv, MIN_HUE, MAX_HUE)
		hue_regions = ndimage.median_filter(hue_regions, 3)
		
		#keep largest region
		labeled_img, num_labels = ndimage.label(~hue_regions)
		component_sizes = ndimage.sum(np.ones_like(num_labels), labeled_img, range(num_labels+1))

		mask_sizes = component_sizes != component_sizes.max()
		pixels_to_remove = mask_sizes[labeled_img]
		labeled_img[pixels_to_remove] = 0
		mask = labeled_img > 0
		
		custom color of non-sushi region (bright green, since sushi uses seaweed, which is a very dark green, almost black)
		screen = np.ones(img_hsv.shape, dtype=np.uint8)
		screen[:,:] = [50, 200, 200]
		screen = cv2.cvtColor(screen, cv2.COLOR_HSV2BGR)
		
		result = cv2.bitwise_and(img, img, mask=mask.astype("uint8"))
		screen_mask = cv2.bitwise_and(screen, screen, mask=(~mask).astype("uint8"))
		result = cv2.add(result, screen_mask)

		return result
def checkArrayMinMax(self, a1, a2):
    """
        Tests whether an image has a valid range for libcv
        """
    a1b = ndimage.median_filter(a1, size=3)
    min1 = ndimage.minimum(a1b)
    max1 = ndimage.maximum(a1b)
    if max1 - min1 < 10:
        self.logger.error("Old Image Range Error %d" % int(max1 - min1))
        return False
    a2b = ndimage.median_filter(a2, size=3)
    min2 = ndimage.minimum(a2b)
    max2 = ndimage.maximum(a2b)
    if max2 - min2 < 10:
        self.logger.error("New Image Range Error %d" % int(max2 - min2))
        return False
    return True