Esempio n. 1
0
def dem_ditch_detection(arr):
    """
    DEM ditch enhancement.
    """
    newArr = arr.copy()
    maxArr = gf(arr, np.amax, footprint=create_circular_mask(30))
    minArr = gf(arr, np.amin, footprint=create_circular_mask(10))
    meanArr = gf(arr, np.median, footprint=create_circular_mask(10))
    minMaxDiff = arr.copy()
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            if minArr[i][j] < maxArr[i][j] - 3:
                minMaxDiff[i][j] = 1
            else:
                minMaxDiff[i][j] = 0
    closing = morph.binary_closing(minMaxDiff,
                                   structure=create_circular_mask(10))
    closing2 = morph.binary_closing(closing,
                                    structure=create_circular_mask(10))
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            if arr[i][j] < meanArr[i][j] - 0.1:
                newArr[i][j] = meanArr[i][j] - arr[i][j]
            else:
                newArr[i][j] = 0
            if closing2[i][j] == 1:
                newArr[i][j] = 0
    return newArr
Esempio n. 2
0
def slopeNonDitchAmplification(arr):
    newArr = arr.copy()
    arr = gf(arr, np.nanmedian, footprint=create_circular_mask(35))
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            if arr[i][j] < 8:
                newArr[i][j] = 0
            elif arr[i][j] < 9:
                newArr[i][j] = 20
            elif arr[i][j] < 10:
                newArr[i][j] = 25
            elif arr[i][j] < 11:
                newArr[i][j] = 30
            elif arr[i][j] < 13:
                newArr[i][j] = 34
            elif arr[i][j] < 15:
                newArr[i][j] = 38
            elif arr[i][j] < 17:
                newArr[i][j] = 42
            elif arr[i][j] < 19:
                newArr[i][j] = 46
            elif arr[i][j] < 21:
                newArr[i][j] = 50
            else:
                newArr[i][j] = 55
    return gf(newArr, np.nanmean, footprint=create_circular_mask(15))
Esempio n. 3
0
def skyViewNonDitchAmplification(arr):
    arr = gf(arr, np.nanmedian, footprint=create_circular_mask(25))
    newArr = arr.copy()
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            if arr[i][j] < 0.92:
                newArr[i][j] = 46
            elif arr[i][j] < 0.93:
                newArr[i][j] = 37
            elif arr[i][j] < 0.94:
                newArr[i][j] = 29
            elif arr[i][j] < 0.95:
                newArr[i][j] = 22
            elif arr[i][j] < 0.96:
                newArr[i][j] = 16
            elif arr[i][j] < 0.97:
                newArr[i][j] = 11
            elif arr[i][j] < 0.98:
                newArr[i][j] = 7
            elif arr[i][j] < 0.985:
                newArr[i][j] = 4
            elif arr[i][j] < 0.99:
                newArr[i][j] = 2
            else:
                newArr[i][j] = 1
    return gf(newArr, np.nanmean, footprint=create_circular_mask(10))
Esempio n. 4
0
def hpmfFilter(arr):
    binary = arr.copy()
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            if arr[i][j] < 1000000 and arr[i][j] > -1000000:
                binary[i][j] = 1000000000
            else:
                binary[i][j] = 0
    mean = gf(gf(gf(gf(binary, np.amax, footprint=create_circular_mask(1)), np.amax, footprint=create_circular_mask(1)), np.median, footprint=create_circular_mask(2)), np.nanmean, footprint=create_circular_mask(5))
    reclassify = mean.copy()
    for i in range(len(mean)):
        for j in range(len(mean[i])):
            if mean[i][j] < 1:
                reclassify[i][j] = 0
            elif mean[i][j] < 30000000:
                reclassify[i][j] = 1
            elif mean[i][j] < 70000000:
                reclassify[i][j] = 2
            elif mean[i][j] < 100000000:
                reclassify[i][j] = 50
            elif mean[i][j] < 200000000:
                reclassify[i][j] = 75
            elif mean[i][j] < 500000000:
                reclassify[i][j] = 100
            elif mean[i][j] < 800000000:
                reclassify[i][j] = 300
            elif mean[i][j] < 1000000000:
                reclassify[i][j] = 600
            else:
                reclassify[i][j] = 1000
    return gf(reclassify, np.nanmean, footprint=create_circular_mask(7))
Esempio n. 5
0
    def estimate_sensitivity(self):
        print("opening mosaic...")
        exposure = self.raw_exposure_ext().data

        variance = self.raw_variance_ext().data
        significance = self.raw_significance_ext().data
        intensity = self.raw_intensity_ext().data
        nce = self.raw_significance_ext().data

        mask = exposure > exposure.max() / self.exposure_fraction_cut

        from scipy.ndimage.filters import gaussian_filter as gf

        rms = (gf(intensity ** 2, 30) - gf(intensity, 30) ** 2) ** 0.5

        fits.PrimaryHDU(rms).writeto(self.hostdir+self.out_prefix+"rms_%i.fits" % self.i_band, overwrite=True)

        total_rms = std(significance[mask])

        def avg_nonan(a):
            return average(a[where(~isnan(a))])

        #e1, e2 = self.erange(self.i_band)

        statdict = dict(
            rms=total_rms,
            rms_min=rms.min(),
            variance_min=variance[variance > 0].min() ** 0.5,
            exposure_max=exposure.max(),
            significance_max=significance.max(),
        )

        self.save_statistics(statdict)
 def plotCenters(self):
     self.p1.clear()
     self.p2.clear()
     for r in self.rois:
         centers = np.copy(r['centers'])
         #self.p1.plot(y=centers[:, 0] / np.average(centers[:, 0]), pen=r['roi'].pen)
         #self.p2.plot(y=centers[:, 1] / np.average(centers[:, 1]), pen=r['roi'].pen)
         self.p1.plot(y=gf(centers[:, 0], self.slider.value()), pen=r['roi'].pen)
         self.p2.plot(y=gf(centers[:, 1], self.slider.value()), pen=r['roi'].pen)
Esempio n. 7
0
 def plotCenters(self):
     self.p1.clear()
     self.p2.clear()
     for r in self.rois:
         centers = np.copy(r['centers'])
         #self.p1.plot(y=centers[:, 0] / np.average(centers[:, 0]), pen=r['roi'].pen)
         #self.p2.plot(y=centers[:, 1] / np.average(centers[:, 1]), pen=r['roi'].pen)
         self.p1.plot(y=gf(centers[:, 0], self.slider.value()),
                      pen=r['roi'].pen)
         self.p2.plot(y=gf(centers[:, 1], self.slider.value()),
                      pen=r['roi'].pen)
Esempio n. 8
0
    def filterf(self):
        """Gaussian filtering of velocity """

        self._obj["u"] = xr.DataArray(
            gf(self._obj["u"].values, [1, 1, 0]), dims=("x", "y", "t")
        )
        self._obj["v"] = xr.DataArray(
            gf(self._obj["v"].values, [1, 1, 0]), dims=("x", "y", "t")
        )

        return self._obj
Esempio n. 9
0
def auto_bright_nonlin(img,
                       epochs,
                       transform_factor=0.5,
                       sigma=0.8,
                       mean_thresh=2,
                       mean_reduction=0.9):
    """
    TODO: Transform multiple images simultaneously (e.g. Before and After) per Roy's request

    Try sliding window approach and maximize entropy for each window. Windows can't be too small, or too large.
    :param img: numpy array/obj of the image you want to transform
    :param epochs: hyperparameter for number of transformations
    :param transform_factor: hyperparameter for rate of exponential transformation
    :param sigma: gaussian filter hyperparameter
    :param mean_thresh: hyperparameter controlling sensitivity of intensity cutoff
    :param mean_reduction: hyperparameter for reducing the lowest intensity pixels
    :return best_img: maximum entropy image
    """
    # normalize pixels between 0 and 1
    img = np.array(img).astype(np.float)
    img *= 1 / np.max(img)

    # calculate initial entropy of the image
    counts, bins = np.histogram(img)
    count_frac = [count / np.sum(counts) for count in counts]
    d = dit.Distribution(list(map(str, range(len(counts)))), count_frac)
    entropy_loss = [entropy(d)]
    d_entropy = 1  # arbitrary
    imgs = [
        img
    ]  # holds all images so that we can choose the one with the best entropy
    for i in range(epochs):
        # remove low intensity pixels
        img[img <= mean_thresh * np.mean(img)] *= mean_reduction
        img = gf(img, sigma=sigma)
        img = img**(1 - (transform_factor * d_entropy))
        img[img == np.inf] = 1  # clip infities at 1
        imgs.append(img)
        counts, bins = np.histogram(img)
        count_frac = [count / np.sum(counts) for count in counts]
        d = dit.Distribution(list(map(str, range(len(counts)))), count_frac)
        entropy_loss.append(entropy(d))
        d_entropy = entropy_loss[-1] - entropy_loss[-2]
        if i % 10 == 0:
            print('Finished: ', 100 * i / epochs, '%')

    print('Best entropy: ', max(entropy_loss), 'at ix ',
          entropy_loss.index(max(entropy_loss)))
    best_img = imgs[entropy_loss.index(max(entropy_loss))]
    best_img = gf(best_img, sigma=sigma)
    return best_img, entropy_loss
Esempio n. 10
0
 def findminmax(self, data, rng=50, start=0, ignore=10, dtype='height'):
     """ find local min & max.
     """
     foo = argrelextrema(gf(data, 5), np.less_equal, order=rng)[0]
     vall = foo[np.where((np.roll(foo, 1) - foo) != -1)[0]]
     vall = vall[vall >= ignore][start:]
     foo = argrelextrema(gf(data, 5), np.greater_equal, order=rng)[0]
     peak = foo[np.where((np.roll(foo, 1) - foo) != -1)[0]]
     peak = peak[peak >= ignore][start:]
     if dtype == 'height':
         return [peak, vall]
     elif dtype == 'depth':
         peakvalue = gf(data, 5)[peak]
         vallvalue = gf(data, 5)[vall]
         return [peak, vall, peakvalue, vallvalue]
Esempio n. 11
0
def circular_filter(image_data, radius):
    kernel = np.zeros((2 * radius + 1, 2 * radius + 1))
    y, x = np.ogrid[-radius:radius + 1, -radius:radius + 1]
    mask = x**2 + y**2 <= radius**2
    kernel[mask] = 1
    filtered_image = gf(image_data, np.median, footprint=kernel)
    return filtered_image
Esempio n. 12
0
def gau_smooth(arr0,typ='single',krn=75.0/4.0,verbose=False):
	import numpy as np
	from scipy.ndimage.filters import gaussian_filter1d as gf1d
	from scipy.ndimage.filters import gaussian_filter as gf
	N = arr0.ndim
	ktyp = type(krn)
	modes = ['reflect','wrap','wrap']
	if ktyp==float:
		krn = [krn,krn,krn]
	if typ=='single':
		ret = np.zeros(arr0.shape)
		for i in range(3):
			if verbose:
				print('Smoothing type: wrap, component: {}'.format(i))
			ret[i] = gf(arr0[i],sigma=krn,mode='wrap')
		return ret
	elif typ=='piecewise':
		for i in range(3):
			if N==3:
				cax = i
			if N==4:
				cax = i+1
			if verbose:
				print('Axis = {}; k = {}, mode = {}'.format(cax,krn[i],modes[i]))
			clab = 'arr{}'.format(i)
			nlab = 'arr{}'.format(i+1)
			if verbose:
				print(clab,nlab)
			locals()[nlab] = gf1d(locals()[clab],axis=cax,sigma=krn[i],mode=modes[i])
		if verbose:
			print('Returning {}'.format(nlab))
		return	locals()[nlab]
Esempio n. 13
0
def read_hyper_clean(fpath, fname=None):
    """ Read a clean hyperspectral scan. """

    # -- read in the scan
    print("reading and converting to float...")
    t0 = time.time()
    cube = read_hyper(fpath, fname)
    cube.data = cube.data.astype(float)
    elapsed_time(t0)

    # -- read in the offset
    oname = os.path.split(cube.filename)[-1].replace(".raw", "_off.npy")
    opath = os.path.join("..", "output", "scan_offsets", oname)
    try:
        off = np.load(opath)
    except:
        print("offset file {0} not found!!!\n  Generating...".format(opath))
        t0 = time.time()
        off = np.median(gf(cube.data, (0, 1, 1)), 2, keepdims=True)
        np.save(opath, off)
        elapsed_time(t0)

    # -- remove offset
    print("removing offset...")
    t0 = time.time()
    cube.data -= off
    elapsed_time(t0)

    return cube
Esempio n. 14
0
    def evaluation(self, exeno, err=[]):
        """ exercise performance evaluation
        """
        fig = plt.figure(1)
        ax = fig.add_subplot(111)
        if exeno == 1:       
            ax.plot(gf(self.breath_list, 10), color='g')
            if len(self.ngframe) != 0:
                for i in self.ngframe:
                    y1 = self.breath_list[i]
                    if y1 < 15000:
                        y2 = y1+10000
                    else:
                        y2 = y1-10000    
                    ax.annotate('Not deep breath', xy=(i, y1+10), xytext=(i, y2),arrowprops=dict(facecolor='red', shrink=0.05),)
            plt.title('Breath in and out')
            fig.savefig('output/bio.jpg')
        elif exeno == 2:
            ax.plot(self.hstate[:,0]*20000, color='b')
            ax.plot(self.hstate[:,1]*20000-20000, color='r')
            # ax.plot(gf(self.breath_list, 10)/self.breath_list[0]*2, color='g')
            ax.plot(gf(self.breath_list, 10), color='g')
            if len(self.ngframe) != 0:
                for i in self.ngframe:
                    y1 = self.breath_list[i]#/self.breath_list[0]*2
                    y2 = 1.5*10000
                    ax.annotate('breath not deep enough', xy=(i, y1), xytext=(i, y2),arrowprops=dict(facecolor='red', shrink=0.05),)
            if len(self.missingbreath) != 0:
                for i in self.missingbreath:
                    x = sum(i)/2
                    y1 = self.breath_list[x]#/self.breath_list[0]*2 
                    y2 = 1*10000
                    ax.annotate('missing breath', xy=(x, y1), xytext=(x, y2),arrowprops=dict(facecolor='green', shrink=0.05),)

            plt.title('Breath in and out & hands open and close')
            fig.savefig('output/biohoc.jpg')
            plt.show()
            # pdb.set_trace()
        plt.close(fig)

        print('\nevaluation:')
        if len(self.error) != 0:
            for i in self.error:
                print i
            print('\n')
        else:
            print('perfect !!\n')
Esempio n. 15
0
	def handle(self, *args, **options):
		# vars
		experiment_name = options['expt']
		series_name = options['series']
		t = options['t']

		R = 1
		delta_z = -8
		# sigma = 5

		if experiment_name!='' and series_name!='':
			experiment = Experiment.objects.get(name=experiment_name)
			series = experiment.series.get(name=series_name)

			# select composite
			composite = series.composites.get()

			# load gfp
			gfp_gon = composite.gons.get(t=t, channel__name='0')
			gfp_start = exposure.rescale_intensity(gfp_gon.load() * 1.0)
			print('loaded gfp...')

			# load bf
			bf_gon = composite.gons.get(t=t, channel__name='1')
			bf = exposure.rescale_intensity(bf_gon.load() * 1.0)
			print('loaded bf...')

			for sigma in [0, 5, 10, 20]:
				gfp = gf(gfp_start, sigma=sigma) # <<< SMOOTHING
				for level in range(gfp.shape[2]):
					print('level {} {}...'.format(R, level))
					gfp[:,:,level] = convolve(gfp[:,:,level], np.ones((R,R)))

				# initialise images
				Z = np.zeros(composite.series.shape(d=2), dtype=int)
				Zmean = np.zeros(composite.series.shape(d=2))
				Zbf = np.zeros(composite.series.shape(d=2))

				Z = np.argmax(gfp, axis=2) + delta_z

				# outliers
				Z[Z<0] = 0
				Z[Z>composite.series.zs-1] = composite.series.zs-1

				for level in range(bf.shape[2]):
					print('level {}...'.format(level))
					bf_level = bf[:,:,level]
					Zbf[Z==level] = bf_level[Z==level]

				Zmean = 1 - np.mean(gfp, axis=2) / np.max(gfp, axis=2)

				imsave('zbf_R-{}_sigma-{}_delta_z{}.png'.format(R, sigma, delta_z), Zbf)

			# plt.imshow(Zbf, cmap='Greys_r')
			# plt.show()

		else:
			print('Please enter an experiment')
Esempio n. 16
0
def convol_spec(lam, spec, fwhm):
    '''Convolves a spectrum with a Gaussina ILS with conversion from a FWHM.
    '''
    inter = (lam[1] - lam[0] + lam[-1] - lam[-2]) / 2.
    std = fwhm / 2. / np.sqrt(2. * np.log(2.))
    pix_std = std / inter
    G = gf(spec, pix_std)

    return G
def pre_smooth(Kbody, shape, sigma=3):
    for i in Kbody.keys():

        Mean = np.tile(
            np.mean(Kbody[i], axis=1).reshape((3, -1)), (1, shape[1]))
        Std = np.tile(np.std(Kbody[i], axis=1).reshape((3, -1)), (1, shape[1]))
        Kbody[i] = gf((Kbody[i] - Mean) / Std, sigma, axis=1) * Std + Mean

    return Kbody
def merge_mice(mice, datadir, sigma=1.5, norm=True, match_baseline_test_sizes=True):
    # Given a list of already analyzed mice, combine their data into one image and heatmap.
    # Return array: [ [[BASELINE IMG],[BASELINE HEATMAP]], [[TEST IMG],[TEST HEATMAP]] ]
    results = []

    for mode in [BASELINE, TEST]:
        heats = []
        pics = []

        for mouse in mice:
            a = Analysis(mouse, mode, data_directory=datadir)
            bg = a.get_background()['image']
            tr = a.get_tracking()
            heat = tr['heat']
            
            t = np.array(a.get_time())
            tdiff = t[1:] - t[:-1]
            assert np.std(tdiff) < 0.01
            Ts = np.mean(tdiff)
            resamp = tr['params'][np.where(tr['params_key']=='resample')]
            spf = Ts*resamp
            
            heat *= spf
            if norm:
                heat = heat/np.sum(heat)
            
            bg = a.crop(bg)
            heat = a.crop(heat)
            
            heats.append(heat)
            pics.append(bg)

        minheight, minwidth = min([np.shape(h)[0] for h in heats]), min([np.shape(h)[1] for h in heats])
        for idx,h in enumerate(heats):
            heats[idx] = resize(h, (minwidth,minheight))
            pics[idx] = resize(pics[idx], (minwidth,minheight))

        heat = np.dstack(heats)
        img = np.dstack(pics)
        img = np.mean(img, axis=2)
        avg = np.mean(heat, axis=2)
        heat = gf(avg,sigma)
        heat = heat/np.max(heat)
        results.append([img,heat])
        
    if match_baseline_test_sizes:
        heats = [results[BASELINE][HEAT], results[TEST][HEAT]]
        pics = [results[BASELINE][IMG], results[TEST][IMG]]
        minheight, minwidth = min([np.shape(h)[0] for h in heats]), min([np.shape(h)[1] for h in heats])
        for idx,h in enumerate(heats):
            heats[idx] = resize(h, (minwidth,minheight))
            pics[idx] = resize(pics[idx], (minwidth,minheight))

        results[BASELINE][HEAT], results[TEST][HEAT] = heats
        results[BASELINE][IMG], results[TEST][IMG] = pics
    return results
Esempio n. 19
0
def customRemoveNoise(arr, radius, threshold, selfThreshold):
    newArr = arr.copy()
    print("creating maxArr")
    maxArr = gf(arr, np.nanmax, footprint=create_circular_mask(radius))
    print("maxArr created")
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            if maxArr[i][j] < threshold and arr[i][j] < selfThreshold:
                newArr[i][j] *= 0.25
    return newArr
Esempio n. 20
0
def gaussian_action(seq_len, action_bounds, first_zero):
    zeros = np.zeros((first_zero, 2))
    actions = np.random.normal(0.0, 0.1, size=(seq_len - first_zero, 2))
    b = gf(np.array(actions), 10.0, order=0, axis=0)
    bias = -b[0]
    b += bias
    ret = np.concatenate([zeros, b], axis=0)
    seq = np.arange(100)
    #plt.plot(seq, ret)
    #plt.show()
    return ret
Esempio n. 21
0
def _gaussian_blur(feature, width, size, **kwargs):
    from scipy.ndimage.filters import gaussian_filter as gf

    y = []
    for img in np.split(feature, feature.shape[0]):
        c = []
        for channel in np.split(img, img.shape[-1]):
            channel = np.squeeze(channel).astype('float')
            c.append(gf(channel, width, mode='constant', truncate=(size // 2) / width))
        y.append(np.stack(c, axis=-1))
    return np.stack(y)
Esempio n. 22
0
def conicProbaPostProcessing(arr, maskRadius, threshold):
    masks = []
    print("creating max mask")
    maxArr = gf(arr, np.nanmax, footprint=create_circular_mask(5))
    print("max mask created")
    for i in range(0, 8):
        masks.append(create_conic_mask(maskRadius, i))
    newArr = arr.copy()
    amountOfUpdated = 0
    examinedPoints = 0
    for i in range(len(arr)):
        print(i)
        for j in range(len(arr[i])):
            if arr[i][j] < 0.5 and maxArr[i][j] > 0.6:
                examinedPoints += 1
                trueProba = probaMeanFromMasks(arr, (i, j), masks)
                updatePixel = 0
                if trueProba[0] > threshold and trueProba[4] > threshold:
                    updatePixel = trueProba[0] if trueProba[0] > trueProba[4] else trueProba[4]
                if trueProba[1] > threshold and trueProba[5] > threshold:
                    updatePixelAgain = trueProba[1] if trueProba[1] > trueProba[5] else trueProba[5]
                    if updatePixelAgain > updatePixel:
                        updatePixel = updatePixelAgain
                if trueProba[2] > threshold and trueProba[6] > threshold:
                    updatePixelAgain = trueProba[2] if trueProba[6] > trueProba[2] else trueProba[6]
                    if updatePixelAgain > updatePixel:
                        updatePixel = updatePixelAgain
                if trueProba[3] > threshold and trueProba[7] > threshold:
                    updatePixelAgain = trueProba[3] if trueProba[3] > trueProba[7] else trueProba[7]
                    if updatePixelAgain > updatePixel:
                        updatePixel = updatePixelAgain
                if updatePixel != 0:
                    amountOfUpdated += 1
                    if updatePixel < 0.5:
                        updatePixel *= 1.4
                    elif updatePixel < 0.55:
                        updatePixel *= 1.35
                    elif updatePixel < 0.6:
                        updatePixel *= 1.3
                    elif updatePixel < 0.65:
                        updatePixel *= 1.25
                    elif updatePixel < 0.7:
                        updatePixel *= 1.2
                    elif updatePixel < 0.75:
                        updatePixel *= 1.15
                    elif updatePixel < 0.85:
                        updatePixel *= 1.1
                    elif updatePixel < 0.9:
                        updatePixel *= 1.05
                    newArr[i][j] = updatePixel
    print(examinedPoints)
    print(amountOfUpdated)
    return newArr
Esempio n. 23
0
def harris_detector(im, threshold):

    #
    # usage: python harris.py 'image.png'
    #
    #    threshold = 1e-4

    g1 = fspecial_gaussian([9, 9], 1)  # Gaussian with sigma_d
    g2 = fspecial_gaussian([11, 11], 1.5)  # Gaussian with sigma_i

    img1 = conv2(im, g1, 'same')  # blur image with sigma_d
    Ix = conv2(img1, np.array([[-1, 0, 1]]), 'same')  # take x derivative
    Iy = conv2(img1, np.transpose(np.array([[-1, 0, 1]])),
               'same')  # take y derivative

    # Compute elements of the Harris matrix H
    # we can use blur instead of the summing window
    Ix2 = conv2(np.multiply(Ix, Ix), g2, 'same')
    Iy2 = conv2(np.multiply(Iy, Iy), g2, 'same')
    IxIy = conv2(np.multiply(Ix, Iy), g2, 'same')
    eps = 2.2204e-16
    R = np.divide(
        np.multiply(Ix2, Iy2) - np.multiply(IxIy, IxIy), (Ix2 + Iy2 + eps))

    # don't want corners close to image border
    #    R[0:20] = 0  # all columns from the first 15 lines
    #    R[-21:] = 0  # all columns from the last 15 lines
    #    R[:, 0:20] = 0  # all lines from the first 15 columns
    #    R[:, -21:] = 0  # all lines from the last 15 columns
    R[0:29] = 0  # all columns from the first 15 lines
    R[-30:] = 0  # all columns from the last 15 lines
    R[:, 0:29] = 0  # all lines from the first 15 columns
    R[:, -30:] = 0  # all lines from the last 15 columns

    # non-maxima suppression within 3x3 windows
    Rmax = gf(R, np.max, footprint=np.ones((3, 3)))
    Rmax[Rmax != R] = 0  # suppress non-max
    v = Rmax[Rmax != 0]
    Rmax[Rmax < threshold] = 0
    y, x = np.nonzero(Rmax)

    #    # show 'em
    #    for xp, yp in zip(x, y):
    #        rr, cc = draw.circle_perimeter(yp, xp, radius=6, shape=im.shape)
    #        im[rr, cc] = 1
    #    plt.imshow(im)
    #    plt.show()

    pointlist = []
    for vi, xi, yi in zip(v, x, y):
        pointlist.append(CorresPoint(vi, xi, yi))
    return np.array(pointlist)
Esempio n. 24
0
 def local_minmax(self, seq1, seq2, th, minmax_str, rng=15, scale=3):
     """ finding local min or max depending on the argument minmax
     """
     breath_list = gf(self.breath_list, scale)
     if minmax_str == 'min':
         minmax = np.less
     elif minmax_str == 'max':
         minmax = np.greater
     pts = argrelextrema(breath_list, minmax, order=rng)[0]
     if len(pts) != 0:
         if (pts[-1] - seq1[-1][0] >= rng and minmax(breath_list[pts[-1]], th) and
             pts[-1] > seq2[-1, 0]):
             seq1 = np.vstack((seq1, np.array([pts[-1], breath_list[pts[-1]]])))
     return np.atleast_2d(seq1)
Esempio n. 25
0
def impoundmentAmplification(arr):
    newArr = arr.copy()
    for i in range(len(arr)):
        for j in range(len(arr[i])):
            if arr[i][j] == 0:
                newArr[i][j] = 0
            elif arr[i][j] < 1000000000:
                newArr[i][j] = 5
            elif arr[i][j] < 1010000000:
                newArr[i][j] = 50
            elif arr[i][j] < 1020000000:
                newArr[i][j] = 100
            elif arr[i][j] < 1030000000:
                newArr[i][j] = 1000
            elif arr[i][j] < 1040000000:
                newArr[i][j] = 10000
            elif arr[i][j] < 1050000000:
                newArr[i][j] = 100000
            else:
                newArr[i][j] = 1000000

    mask = create_circular_mask(10)
    return gf(gf(gf(newArr, np.nanmean, footprint=mask), np.nanmean, footprint=mask), np.nanmedian, footprint=mask)
Esempio n. 26
0
 def breath_plot(self, ana, exeno):
     fig = plt.figure(1)
     ax = fig.add_subplot(111)
     if len(ana.hs.hstate) == 0:  # only did breathe test (i.e. exer 1)
         ax.plot(gf(ana.brth.breath_list, 5), color='g')
         if len(ana.brth.ngframe) != 0:
             for i in ana.brth.ngframe:
                 y1 = ana.brth.breath_list[i]
                 y2 = y1 - 20
                 ax.annotate('Not deep breathing', xy=(i, y1-2), xytext=(i, y2),\
                             arrowprops=dict(facecolor='red', shrink=0.05),)
         plt.title('Breathe in and out')
         fig.savefig('output/Exer%s_bio_1.jpg' % repr(exeno))
         plt.close(fig)
Esempio n. 27
0
 def local_minmax(self, seq, th, minmax, rng=15):
     """ finding local min or max depending on the argument minmax
     """
     angle_bending = gf(self.angle_mean, 3)
     pts = argrelextrema(angle_bending, minmax, order=rng)[0]
     if len(pts) != 0:
         if pts[-1] - seq[-1][0] >= rng and minmax(angle_bending[pts[-1]],
                                                   th):
             seq = np.vstack(
                 (seq, np.array([pts[-1], angle_bending[pts[-1]]])))
         elif 0 < pts[-1] - seq[-1][0] < rng and minmax(
                 angle_bending[pts[-1]], seq[-1][1]):
             seq[-1] = np.array([pts[-1], angle_bending[pts[-1]]])
     return np.atleast_2d(seq)
def create_filter_with_mask(postfix, arr_with_filenames, function, mask):
    """
    Create a filter over an array of filenames.npy files.
    Existing files with correct naming schemes will NOT be updated if they exist.
    _raw files will be skipped.
    Returns an iterator that can be used to show/save filtered arrays. A name is also yielded.
    """
    for filename in arr_with_filenames:
        if filename[-4:] != "_raw":
            continue
        elif os.path.isfile(f"./{filename[:-4]}_{postfix}.npy"):
            continue
        arr = np.load(f"{filename}.npy")
        holder = gf(arr, function, footprint=mask)
        yield (f"{filename[:-4]}_{postfix}", holder)
Esempio n. 29
0
def dmask(delta, thresh=0.5, morph=True, struct=3, smooth=False, sigma=1):
    mask = np.where(
        delta > thresh, 0,
        1)  # set values where delta > thresh to 0 and otherwise to 1
    mask[np.isnan(delta)] = 0  # make sure NaNs are masked
    if morph:  # removes holes and island pixels
        mask = nd.binary_closing(nd.binary_opening(mask,
                                                   structure=np.ones(
                                                       (struct, struct))),
                                 structure=np.ones((struct, struct)))
    if smooth:
        mask = gf(
            mask.astype(float), sigma
        )  # reduces pixelation. Not applicable with mixed mapping from Tyo et al
        # only usable if P is multiplied by D prior to visualization
    return mask
Esempio n. 30
0
    def score(self, im_diff):
        a, b = self.pos.x, self.pos.y
        r = np.round(self.diameter / 2).astype('int')
        n = 2 * r + 1
        data = im_diff[b - r:b + r + 1, a - r:a + r + 1, :]

        kernel = np.zeros((n, n, 3))
        y, x = np.ogrid[-r:r + 1, -r:r + 1]
        mask_circle = x**2 + y**2 <= r**2
        kernel[mask_circle] = 1
        # I expect to see RuntimeWarnings in this block
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            fitness = np.mean(
                np.mean(np.mean(gf(data, np.mean, footprint=kernel))))

        self.fitness = fitness
Esempio n. 31
0
def get_batch():

    out = buffer[0:1 + 2]
    out_hr = buffer_hr[0:3]
    buffer.pop(0)
    buffer_hr.pop(0)
    add_image = next(reader)
    buffer_hr.append(add_image)

    # downscale by factor 4 with gaussian smoothing
    if downscale:
        s = 1.5
        add_image = gf(add_image, sigma=[s, s, 0])[0::4, 0::4, :]
        add_image = np.rint(np.clip(add_image, 0, 255)).astype(np.uint8)

    buffer.append(add_image)

    return np.array(out), np.array(out_hr)
Esempio n. 32
0
 def clip(self, seqlist, weight):
     """ try find the subsequence from current sequence
     """
     tgrad = 0
     for ii in [3, 4, 5, 6, 7, 8, 12, 13, 14, 15, 16, 17]:
         tgrad += (np.gradient(gf(seqlist[:, ii], 1))**2) * weight[ii]
     tgrad = tgrad**0.5
     lcalminm = argrelextrema(tgrad, np.less, order=5)[0]
     foo = np.where(((tgrad < 1) * 1) == 0)[0]
     if (len(foo) == 0) | (len(lcalminm) == []):
         return []
     else:
         lb = max(foo[0], 50)
         minm = []
         for ii in lcalminm[lcalminm > lb]:
             if tgrad[ii] < 1:
                 minm.append(ii)
         return minm
Esempio n. 33
0
def binarize(data, sigma=None, smooth=None):
    """
    Convert spectra to boolean values at each wavelength.

    The procedure estimates the noise by taking the standard
    deviation of the derivative spectrum and dividing by sqrt(2).
    The zero-point offset for each spectrum is estimated as the
    mean of the first 10 wavelengths (empirically seen to be
    "flat" for most spectra) and is removed.  Resultant points
    >5sigma [default] are given a value of True.

    Parameters
    ----------
    sigma : float, optional
        Sets the threshold, above which the wavelength is considered
        to have flux.
    """

    # -- smooth if desired
    dat = data.T if not smooth else gf(data.T,[0,smooth])

    if sigma:
        # -- estimate the noise and zero point for each spectrum
        print("BINARIZE: estimating noise level and zero-point...")
        sig = (dat[1:]-dat[:-1])[-100:].std(0)/np.sqrt(2.0)
        zer = dat[:10].mean(0)

        # -- converting to binary
        print("BINARIZE: converting spectra to boolean...")
        bdata = (dat-zer)>(sigma*sig)
    else:
        # -- careful about diffraction spikes which look like
        # -- absoportion
#        mn_tot = dat.mean(0)
#        mn_end = dat[-100:].mean(0)
#        index  = mn_tot > mn_end
#        mn     = mn_tot*index + mn_end*~index

        # -- binarize by comparison with mean
        bdata = dat>dat.mean(0)

    return bdata.T
Esempio n. 34
0
File: noaa.py Progetto: gdobler/hyss
    def binarize(self, sigma=None, interpolated=False, smooth=False):
        """
        Convert spectra to boolean values at each wavelengtqh.

        The procedure estimates the noise by taking the standard
        deviation of the derivative spectrum and dividing by sqrt(2).
        The zero-point offset for each spectrum is estimated as the
        mean of the first 10 wavelengths (empirically seen to be
        "flat" for most spectra) and is removed.  Resultant points
        >5sigma [default] are given a value of True.

        Parameters
        ----------
        sigma : float, optional
            Sets the threshold, above which the wavelength is considered to 
            have flux.

        interpolated: bool, optional
            If True, binarize the interpolated spectra.
        """

        dat = self.rows.T if not interpolated else self.irows.T

        if smooth:
            dat[:] = gf(dat,[smooth,0])

        if sigma:
            # -- estimate the noise and zero point for each spectrum
            print("BINARIZE: estimating noise level and zero-point...")
            sig = (dat[1:]-dat[:-1]).std(0)/np.sqrt(2.0)
            zer = dat[:10].mean(0)

            # -- converting to binary
            print("BINARIZE: converting spectra to boolean...")
            self.brows = ((dat-zer)>(sigma*sig)).T.copy()
        else:
            # -- binarize by comparison with mean
            self.brows = (dat > dat.mean(0)).T

        return
    def find_centers(self):
        win = g.m.currentWindow
        im = win.image
        mx,my=win.imageDimensions()
        self.rois = []
        g.centers = []
        for roi in g.m.currentWindow.rois:
            mask = roi.mask
            mask=mask[(mask[:,0]>=0)*(mask[:,0]<mx)*(mask[:,1]>=0)*(mask[:,1]<my)]

            xx=mask[:,0]; yy=mask[:,1]
            centers = []

            for frame in im:
                gframe = gf(frame, 1)
                x0, y0 = np.unravel_index(gframe.argmax(), gframe.shape)
                #centers.append([x0, y0])
                #vals = fitGaussian(frame, (x0, y0, 1, 3))
                #x1, y1, a, b = vals[0]
                centers.append([x0, y0])
            self.rois.append({'roi': roi, 'centers': centers})
        self.plotCenters()
Esempio n. 36
0
def mod_zdiff(composite, mod_id, algorithm, **kwargs):

    zdiff_channel, zdiff_channel_created = composite.channels.get_or_create(name="-zdiff")

    for t in range(composite.series.ts):
        print("step02 | processing mod_zdiff t{}/{}...".format(t + 1, composite.series.ts), end="\r")

        # get zmod
        zmod_gon = composite.gons.get(channel__name="-zmod", t=t)
        zmod = (exposure.rescale_intensity(zmod_gon.load() * 1.0) * composite.series.zs).astype(int)

        zbf = exposure.rescale_intensity(composite.gons.get(channel__name="-zbf", t=t).load() * 1.0)
        zmean = exposure.rescale_intensity(composite.gons.get(channel__name="-zmean", t=t).load() * 1.0)

        # get markers
        markers = composite.markers.filter(track_instance__t=t)

        zdiff = np.zeros(zmod.shape)

        for marker in markers:
            marker_z = zmod[marker.r, marker.c]

            diff = np.abs(zmod - marker_z)
            diff_thresh = diff.copy()
            diff_thresh = gf(diff_thresh, sigma=5)
            diff_thresh[diff > 1] = diff.max()
            marker_diff = 1.0 - exposure.rescale_intensity(diff_thresh * 1.0)
            zdiff = np.max(np.dstack([zdiff, marker_diff]), axis=2)

        zdiff_gon, zdiff_gon_created = composite.gons.get_or_create(
            experiment=composite.experiment, series=composite.series, channel=zdiff_channel, t=t
        )
        zdiff_gon.set_origin(0, 0, 0, t)
        zdiff_gon.set_extent(composite.series.rs, composite.series.cs, 1)

        zdiff_gon.array = (zdiff.copy() + zmean.copy()) * zmean.copy()
        zdiff_gon.save_array(composite.series.experiment.composite_path, composite.templates.get(name="source"))
        zdiff_gon.save()
def task(filename, task_params):
    ''' Simple task calculating average value in a region. Boundary assumes the expected format being sent in.
    @author Wei Ren
    @param filename - file name of the FITS image
    @param task_params - Region to calculate mean value on. Currently support `rect` and `circle` regions.
    @return Mean value of the results or Error info.
    '''

    hdulist = fits.open(filename)
    region = hdulist[0].data
    region_type, value = task_params['type'], task_params['value']

    if (region_type=='rect'):
        region_slice = parseRegion_rect(value)
        ROI = region[region_slice]
        avg = str(np.mean(ROI))
    elif (region_type=='circle'):
        mask = circle_mask(region, value)
        avg = str(gf(region, np.mean, footprint=mask))
    else:
        avg = "Region type is not recognized."

    hdulist.close()
    return {"result":avg},None
Esempio n. 38
0
	def create_max_gfp(self):
		# template
		template = self.templates.get(name='source') # SOURCE TEMPLATE

		# channels
		max_gfp_channel, max_gfp_channel_created = self.channels.get_or_create(name='-mgfp')

		# iterate over frames
		for t in range(self.series.ts):
			print('step01 | creating max_gfp t{}/{}...'.format(t+1, self.series.ts), end='\r')

			# load gfp
			gfp_gon = self.gons.get(t=t, channel__name='0')
			gfp = exposure.rescale_intensity(gfp_gon.load() * 1.0)
			gfp = gf(gfp, sigma=2) # <<< SMOOTHING

			# images to channel gons
			max_gfp_gon, max_gfp_gon_created = self.gons.get_or_create(experiment=self.experiment, series=self.series, channel=max_gfp_channel, t=t)
			max_gfp_gon.set_origin(0,0,0,t)
			max_gfp_gon.set_extent(self.series.rs, self.series.cs, 1)

			max_gfp_gon.array = np.max(gfp, axis=2)
			max_gfp_gon.save_array(self.series.experiment.composite_path, template)
			max_gfp_gon.save()
Esempio n. 39
0
	def create_zunique(self):

		zunique_channel, zunique_channel_created = self.channels.get_or_create(name='-zunique')

		for t in range(self.series.ts):
			print('creating zunique t{}/{}'.format(t+1, self.series.ts), end='\r' if t<self.series.ts-1 else '\n')
			zmean = exposure.rescale_intensity(self.gons.get(channel__name='-zmean', t=t).load() * 1.0)
			zmod = exposure.rescale_intensity(self.gons.get(channel__name='-zmod', t=t).load() * 1.0)

			zunique = np.zeros(zmean.shape)
			for unique in np.unique(zmod):
				zunique[zmod==unique] = np.max(zmean[zmod==unique]) / np.sum(zmean)

			zunique = gf(zunique, sigma=3)

			zunique_gon, zunique_gon_created = self.gons.get_or_create(experiment=self.experiment, series=self.series, channel=zunique_channel, t=t)
			zunique_gon.set_origin(0,0,0,t)
			zunique_gon.set_extent(self.series.rs, self.series.cs, 1)

			zunique_gon.array = zunique.copy()
			zunique_gon.save_array(self.experiment.composite_path, self.templates.get(name='source'))
			zunique_gon.save()

		return zunique_channel
Esempio n. 40
0
	def create_zmod(self, R=5, delta_z=-8, sigma=5):
		# template
		template = self.templates.get(name='source') # SOURCE TEMPLATE

		# channels
		zmod_channel, zmod_channel_created = self.channels.get_or_create(name='-zmod')
		zmean_channel, zmean_channel_created = self.channels.get_or_create(name='-zmean')
		zbf_channel, zbf_channel_created = self.channels.get_or_create(name='-zbf')
		zcomp_channel, zcomp_channel_created = self.channels.get_or_create(name='-zcomp')

		# iterate over frames
		for t in range(self.series.ts):
			print('step01 | creating zmod, zmean, zbf, zcomp t{}/{}...'.format(t+1, self.series.ts), end='\r')

			# load gfp
			gfp_gon = self.gons.get(t=t, channel__name='0')
			gfp = exposure.rescale_intensity(gfp_gon.load() * 1.0)
			gfp = gf(gfp, sigma=sigma) # <<< SMOOTHING

			# load bf
			bf_gon = self.gons.get(t=t, channel__name='1')
			bf = exposure.rescale_intensity(bf_gon.load() * 1.0)

			# initialise images
			Z = np.zeros(self.series.shape(d=2), dtype=int)
			Zmean = np.zeros(self.series.shape(d=2))
			Zbf = np.zeros(self.series.shape(d=2))

			# loop over image
			Z = np.argmax(gfp, axis=2) + delta_z

			# outliers
			Z[Z<0] = 0
			Z[Z>self.series.zs-1] = self.series.zs-1

			# loop over levels
			for level in range(bf.shape[2]):
				bf[:,:,level] = convolve(bf[:,:,level], np.ones((R,R)))
				Zbf[Z==level] = bf[Z==level,level]

			Zmean = 1 - np.mean(gfp, axis=2) / np.max(gfp, axis=2)

			# images to channel gons
			zmod_gon, zmod_gon_created = self.gons.get_or_create(experiment=self.experiment, series=self.series, channel=zmod_channel, t=t)
			zmod_gon.set_origin(0,0,0,t)
			zmod_gon.set_extent(self.series.rs, self.series.cs, 1)

			zmod_gon.array = Z
			zmod_gon.save_array(self.series.experiment.composite_path, template)
			zmod_gon.save()

			zmean_gon, zmean_gon_created = self.gons.get_or_create(experiment=self.experiment, series=self.series, channel=zmean_channel, t=t)
			zmean_gon.set_origin(0,0,0,t)
			zmean_gon.set_extent(self.series.rs, self.series.cs, 1)

			zmean_gon.array = exposure.rescale_intensity(Zmean * 1.0)
			zmean_gon.save_array(self.series.experiment.composite_path, template)
			zmean_gon.save()

			zbf_gon, zbf_gon_created = self.gons.get_or_create(experiment=self.experiment, series=self.series, channel=zbf_channel, t=t)
			zbf_gon.set_origin(0,0,0,t)
			zbf_gon.set_extent(self.series.rs, self.series.cs, 1)

			zbf_gon.array = Zbf
			zbf_gon.save_array(self.series.experiment.composite_path, template)
			zbf_gon.save()
Esempio n. 41
0
def zloc(x):
    return int((x-zl)/dz)# + 1 

def yloc(x):
    return int((x-yl)/dy)# + 1 

for i in xrange(phi_z.size):
    if phi_z[i] > zu or phi_z[i] < zl: continue
    if phi_y[i] > yu or phi_y[i] < yl: continue 
    zpos = zloc(phi_z[i]) 
    ypos = yloc(phi_y[i])
    a[ypos, zpos] += abs(intensity[i])

fig = plt.figure(figsize=(7,7), dpi=100)
ax = fig.add_subplot(1, 1, 1)
a2 = gf(a, 10.0)

ylabels = range(int(yl),int(yu))
ylocs = [yloc(x) for x in ylabels]
ylabels = ['$'+str(x).strip()+'$' for x in ylabels]

zlabels = range(int(zl),int(zu),3)
zlocs = [zloc(x) for x in zlabels]
zlabels = ['$'+str(x).strip()+'$' for x in zlabels]

a2 /= a2.max() 
s = plt.imshow(a2, cmap=cm.jet)
plt.yticks(ylocs, ylabels)
plt.xticks(zlocs, zlabels)
plt.xlabel('mas')
plt.ylabel('mas')
Esempio n. 42
0
	def find_protrusions(self):
		# load mask image and find edge
		mask_img = self.load()
		edge_img = edge_image(mask_img)

		# get list of points that lie on the edge: points_rc
		points_r, points_c = np.where(edge_img)
		points_rc = [list(lm) for lm in list(zip(points_r, points_c))]

		# sort points using a fixed radius
		count, max_count, sorted_points = roll_edge_v1(points_rc)

		if count<max_count:
			# get cell centre and calculate distances of edge points from this point
			cell_centre = np.array([self.r, self.c])
			distances = np.array([np.linalg.norm(cell_centre - np.array(p)) for p in sorted_points])

			# smooth to aid peak finding and shift the points to leave the smallest distance at zero
			argmin = np.argmin(distances)
			distances = np.roll(distances, -argmin)
			distances = gf(distances, sigma=2)

			# find peaks
			peaks = find_peaks(distances, np.array([9]))

			# shift peaks back to their original positions
			true_peaks = np.array(peaks) + argmin
			true_peaks[true_peaks>=len(sorted_points)] -= len(sorted_points) # rotate

			# find end points
			protrusion_end_points = [sorted_points[peak] for peak in true_peaks]

			# create new protrusion for each end point
			for protrusion_end_point in protrusion_end_points:
				relative_end_point = cell_centre - np.array(protrusion_end_point)

				# parameters
				r = relative_end_point[0]
				c = relative_end_point[1]
				length_from_centre = np.linalg.norm(relative_end_point * self.series.scaling()) # in microns
				length_from_mean = length_from_centre - np.mean(distances)
				orientation_from_horizontal = math.atan2(relative_end_point[0], relative_end_point[1])

				# print(self.cell_instance.pk, self.pk, r, c, length_from_centre, orientation_from_horizontal)

				protrusion, protrusion_created = self.protrusions.get_or_create(experiment=self.experiment,
																																				series=self.series,
																																				cell=self.cell,
																																				cell_instance=self.cell_instance,
																																				channel=self.channel,
																																				region=self.region,
																																				region_instance=self.region_instance,
																																				r=r,
																																				c=c)
				if protrusion_created:
					protrusion.length = length_from_centre
					protrusion.length_from_mean = length_from_mean
					protrusion.orientation = orientation_from_horizontal
					protrusion.save()

			return 'success', len(protrusion_end_points)
		else:
			return 'success', 0
Esempio n. 43
0
from __future__ import division
from SSTV import *
from scipy.ndimage.filters import gaussian_filter as gf
import numpy as np
from scipy import misc

# Set up
fraction_coeffs = 0.04
image = misc.imread('imgs/lena.bmp') # load an image: {dude, cal, lake,...}
original = np.copy(image)
_delta = 0.1
or_shape = image.shape

# Gaussian Filter [IMPORTANT] -- LPF
image = gf(image, 0.2) 
image = join_imgs([ map2multiple(im, 64) for im in split_img(image)]) # for weird shapes, map2multiple
yccimgs = split_img(rgb2ycc(image))
wavelet='bior4.4'

print('Original shape:',or_shape)
print('New shape:',image.shape)

dwts = []
ds_ycc = []
compressed_imgs = []
us_ycc = []

sample_facts = [2,4,4]

print('Downsampling YCC channels...')
for i in range(len(yccimgs)):
Esempio n. 44
0
def BlobExtract(vidname,ith=30,pth=0,skip = 0,fnum = 500 ,nbuff = 241,fac=8):

    #ith : intensity differece 
    #pth : threshold for number of pixels in certain label
    #skip: skip first N frame
    #maskpath : 0 :no mask , others : mask path 
    #fnum : how many frame you want to process
    #nbuff : buffer size
    #fac : downsampling scale

    import os
    import sys
    import cv2
    import pickle
    import numpy as np
    from scipy.ndimage.filters import gaussian_filter as gf
    from scipy.ndimage.measurements import label
    
    
    if os.path.isfile(vidname):
        cap = cv2.VideoCapture(vidname)
    else:
        print("Error!! Can not find the file...")
        exit

    nFrame = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    nrow  = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
    ncol  = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)

    tind  = nbuff//2
    frame = np.zeros([nrow,ncol,3])
    img   = np.zeros([nrow,ncol,3])
    bkg   = np.zeros([nrow,ncol,3],dtype=float)
    buff  = np.zeros([nbuff,nrow,ncol,3])
    diff  = np.zeros([nrow,ncol,3])
    diffb = np.zeros([nrow/fac,ncol/fac])
    rows  = np.arange(nrow*ncol/(fac*fac)).reshape(nrow/fac,ncol/fac) // (ncol/fac)
    cols  = np.arange(nrow*ncol/(fac*fac)).reshape(nrow/fac,ncol/fac) % (ncol/fac)

    timestamp = {}

    maskpath = Choosemask()

    if not maskpath:
        mask = np.ones([nrow,ncol])
    else:
        import pickle
        mask = pickle.load(open(maskpath,"rb"))

    # -- initialize tblob dictionary
    Blobdict = {}
    
     
    # -- initialize the display                                                                                                           
    fig, ax = plt.subplots(2,2,figsize=[10,7.5])
    fig.subplots_adjust(0,0,1,1,0,0)
    [j.axis('off') for i in ax for j in i]
    im = [ax[0][0].imshow(frame,), ax[0][1].imshow(frame),
          ax[1][0].imshow(frame,'gray'), ax[1][1].imshow(diffb,'gray')]
    im[2].set_clim([20,128])
    im[3].set_clim([15,25])
    fig.canvas.draw()


    cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,skip)


# -- loop through frames                                                                                                              
    if fnum == -1:
        pfnum = nFrame-skip #process frame number
    else:
        pfnum = min(nFrame-skip,fnum)


    for ii in range(pfnum):    
        print("frame {0}\r".format(ii+skip)),
        sys.stdout.flush()
        # read the frame                                                                                                                  
        chk, frame[:,:,:] = cap.read()
        # load buffer and update background                                                                                               
        if ii<nbuff:
            buff[ii,:,:,:] = frame[:,:,:]
            continue
        elif ii==nbuff:
            bkg[:,:,:] = buff.mean(0)
            cnt = 0
            continue
        else:
            bind = cnt % nbuff
            img[:,:,:]  = buff[(bind+tind)%nbuff]
            bkg[:,:,:] -= buff[bind]/float(nbuff)
            bkg[:,:,:] += frame/float(nbuff)
            buff[bind]  = frame
            cnt += 1

            # calculate the difference image then rebin and smooth                                                
            diff[:,:,:] = img-bkg   
            diffb[:,:]  = gf((np.abs(diff).mean(2)*mask).reshape(nrow/fac,fac,ncol/fac,fac                            
                                                          ).mean(1).mean(-1),[3,1])                                              
            
            try:
                [i.remove() for i in recs]
            except:
                pass

                labcnt = 0
                blobs = {}
                labs = label(diffb>ith)
                nlab = labs[1]
                if nlab>0:
                    rcen = np.zeros(nlab)
                    ccen = np.zeros(nlab)
                    rmm  = np.zeros([nlab,2])
                    cmm  = np.zeros([nlab,2])
                    recs = []

                    for jj in range(nlab):
                        if sum((labs[0]==(jj+1)).flatten())>pth:
                            tlab = jj+1
                            lind = labs[0]==tlab
                            trow = rows[lind]
                            tcol = cols[lind]
                            rcen[jj] = trow.mean()*fac
                            ccen[jj] = tcol.mean()*fac
                            rmm[jj]  = [trow.min()*fac,trow.max()*fac]
                            cmm[jj]  = [tcol.min()*fac,tcol.max()*fac]
                            recs.append(ax[0][0].add_patch(plt.Rectangle((cmm[jj,0],rmm[jj,0]),
                                                                         cmm[jj,1]-cmm[jj,0],
                                                                         rmm[jj,1]-rmm[jj,0],
                                                                         facecolor='none',
                                                                         edgecolor='orange',
                                                                         lw=2)))
                            
                            labcnt += 1
                            blobs[labcnt] = [[cmm[jj,0],rmm[jj,0]],[cmm[jj,1]-cmm[jj,0],rmm[jj,1]-rmm[jj,0]]]        
                                              
                    text = ax[0][0].annotate(str(labcnt),xy=(ncol-40,nrow-50),color = 'red',fontsize = 20)
                    im[0].set_data(img.astype(np.uint8)[:,:,::-1])
                    im[1].set_data(bkg.astype(np.uint8)[:,:,::-1])
                    im[2].set_data(np.abs(diff).mean(2))
                    im[3].set_data(diffb)
                    fig.canvas.set_window_title('Frame {0}'.format(ii+skip))
                    fig.canvas.draw()
                    ax[0][0].texts.remove(text)
                blobs[0] = labcnt
Esempio n. 45
0
    if r >= 0:
        ridx = row_zero[r]
    if c >= 0:
        cidx = col_zero[c]
    return m[:ridx,:cidx]

# Set up
fraction_coeffs = 1
image_fname = 'imgs/lena.bmp'
image = misc.imread(image_fname) # load an image: {dude, cal, lake,...}
original = np.copy(image)
_delta = 0.1 
or_shape = image.shape

# Gaussian Filter [IMPORTANT] -- LPF
image = gf(image, 0.15) 
image = join_imgs([ map2multiple(im, 64) for im in split_img(image)]) # for weird shapes, map2multiple
yccimgs = split_img(rgb2ycc(image))
wavelet='bior4.4'

print('Original shape:',or_shape)
print('New shape:',image.shape)

dwts = []
ds_ycc = []
compressed_imgs = []
us_ycc = []

sample_facts = [2,4,4]

print('Downsampling YCC channels...')
Esempio n. 46
0
def canny1d(lcs, indices=None, width=30, delta=2, see=False, sig_clip_iter=10, 
            sig_clip_amp=2.0, sig_peaks=10.0, xcheck=True, sig_xcheck=2.0):

    # -- defaults
    if indices==None:
        nwin    = lcs.lcs.shape[0]
        indices = range(nwin)
        print("DST_CANNY1D: running edge detector for all " + 
              "{0} windows...".format(nwin))
    else:
        nwin = len(indices)
        print("DST_CANNY1D: running edge detector for " + 
              "{0} windows...".format(nwin))


    # -- utilities
    lcg       = np.zeros(lcs.lcs.shape[1:])
    dlcg      = np.ma.zeros(lcg.shape)
    dlcg.mask = dlcg>314
    ind_onoff = []
    ints      = np.arange(lcs.lcs.shape[0])


    # -- loop through windows
    for ii, index in enumerate(indices):
        if ii%100==0:
            print("DST_CANNY1D:   {0} of {1}".format(ii,nwin))

        # -- smooth each band
        for band in [0,1,2]:
            lcg[:,band] = gf(lcs.lcs[index,:,band],width)


        # -- compute Gaussian difference and set mask edges
        dlcg[:,:]          = np.roll(lcg,-delta,0)-np.roll(lcg,delta,0)
        dlcg.mask[:width]  = True
        dlcg.mask[-width:] = True


        # -- plot
        if see:
            plt.figure(6)
            plt.clf()
            plt.subplot(2,2,2)
            plt.plot(dlcg[:,0], lw=2)
            plt.ylim([1.2*dlcg.min(),1.2*dlcg.max()])
            plt.subplot(2,2,3)
            plt.plot(dlcg[:,1], lw=2)
            plt.ylim([1.2*dlcg.min(),1.2*dlcg.max()])
            plt.subplot(2,2,4)
            plt.plot(dlcg[:,2], lw=2)
            plt.ylim([1.2*dlcg.min(),1.2*dlcg.max()])

        # -- sigma clip
        for _ in range(10):
            avg = dlcg.mean(0)
            sig = dlcg.std(0)
            dlcg.mask = np.abs(dlcg-avg) > sig_clip_amp*sig

            if see:
                plt.subplot(2,2,2)
                plt.plot(dlcg[:,0], lw=2)
                plt.subplot(2,2,3)
                plt.plot(dlcg[:,1], lw=2)
                plt.subplot(2,2,4)
                plt.plot(dlcg[:,2], lw=2)


        # -- set mean and std and reset the mask
        avg                = dlcg.mean(0)
        sig                = dlcg.std(0)
        dlcg.mask[:,:]     = False
        dlcg.mask[:width]  = True
        dlcg.mask[-width:] = True


        # -- find peaks in RGB
        ind_on_rgb, ind_off_rgb = [], []

        tags_on  = (dlcg-avg > sig_peaks*sig) & \
            (dlcg>np.roll(dlcg,1,0)) & \
            (dlcg>np.roll(dlcg,-1,0)) & \
            ~dlcg.mask

        tags_off = (dlcg-avg < -sig_peaks*sig) & \
            (dlcg<np.roll(dlcg,1,0)) & \
            (dlcg<np.roll(dlcg,-1,0)) & \
            ~dlcg.mask

        for band in [0,1,2]:
            ind_on_rgb.append([i for i in ints[tags_on[:,band]]])
            ind_off_rgb.append([ i for i in ints[tags_off[:,band]]])


        # -- collapse RGB indices
        for iind in ind_on_rgb[0]:
            for jind in ind_on_rgb[1]:
                if abs(iind-jind)<=2:
                    ind_on_rgb[1].remove(jind)
            for jind in ind_on_rgb[2]:
                if abs(iind-jind)<=2:
                    ind_on_rgb[2].remove(jind)

        for iind in ind_on_rgb[1]:
            for jind in ind_on_rgb[2]:
                if abs(iind-jind)<=2:
                    ind_on_rgb[2].remove(jind)

        ind_on_list = ind_on_rgb[0] + ind_on_rgb[1] + ind_on_rgb[2]

        for iind in ind_off_rgb[0]:
            for jind in ind_off_rgb[1]:
                if abs(iind-jind)<=2:
                    ind_off_rgb[1].remove(jind)
            for jind in ind_off_rgb[2]:
                if abs(iind-jind)<=2:
                    ind_off_rgb[2].remove(jind)

        for iind in ind_off_rgb[1]:
            for jind in ind_off_rgb[2]:
                if abs(iind-jind)<=2:
                    ind_off_rgb[2].remove(jind)

        ind_off_list = ind_off_rgb[0] + ind_off_rgb[1] + ind_off_rgb[2]


        # -- cross check left/right means for robustness to noise
        if xcheck:
            rtwd = np.sqrt(width)

            for on in [_ for _ in ind_on_list]:
                mn_l  = lcs.lcs[index,on-width:on].mean(1).mean()
                err_l = lcs.lcs[index,on-width:on].mean(1).std()
                mn_r  = lcs.lcs[index,on:on+width].mean(1).mean()
                err_r = lcs.lcs[index,on:on+width].mean(1).std()

                if abs(mn_r-mn_l)<(sig_xcheck*max(err_l,err_r)):
                    ind_on_list.remove(on)

            for off in [_ for _ in ind_off_list]:
                mn_l  = lcs.lcs[index,off-width:off].mean(1).mean()
                err_l = lcs.lcs[index,off-width:off].mean(1).std()
                mn_r  = lcs.lcs[index,off:off+width].mean(1).mean()
                err_r = lcs.lcs[index,off:off+width].mean(1).std()

                if abs(mn_r-mn_l)<(sig_xcheck*max(err_l,err_r)):
                    ind_off_list.remove(off)


        # -- add to on/off list
        tind_onoff = np.array([i for i in ind_on_list+[-j for j in 
                                                        ind_off_list]])

        ind_onoff.append(tind_onoff[np.argsort(np.abs(tind_onoff))])

#        if see:
#            plt.subplot(2,2,2)
#            plt.plot(np.arange(dlcg.shape[0])[on_ind[:,0]],
#                     dlcg[on_ind[:,0],0], 'go')


    return ind_onoff
Esempio n. 47
0
plt.hist(zs["LSSTc"]["resolved"],bins=numpy.linspace(0,5.5,56),normed=True,fc="red",alpha=0.6)
plt.xlabel(r"redshift")
plt.ylabel(r"Lenses per bin")
plt.tight_layout()
if save:plt.savefig("/home/ttemp/papers/LensPop/LSSTz.pdf")
#plt.show()
plt.cla()

a,bins=numpy.histogram(sigl["Euclid"]["resolved"],bins=numpy.linspace(100,400,81),normed=True)
b,bins1=numpy.histogram(sigl["CFHTa"]["resolved"],bins=numpy.linspace(100,400,81),normed=True)
c,bins2=numpy.histogram(sigl["LSSTc"]["resolved"],bins=numpy.linspace(100,400,81),normed=True)
#c*=2
d,bins3=numpy.histogram(sigl["DESc"]["resolved"],bins=numpy.linspace(100,400,81),normed=True)
from scipy.ndimage.filters import gaussian_filter as gf
idx=2
a[idx:]=gf(a,2)[idx:]
b=gf(b,4)
c=gf(c,4)
d=gf(d,4)

#d*=4
db=bins[1]-bins[0]
db1=bins1[1]-bins1[0]
db2=bins2[1]-bins2[0]
db3=bins3[1]-bins3[0]

plt.plot(bins[:-1]+db/2.,a,lw=3,c='k',label="Euclid")
plt.plot(bins2[:-1]+db2/2.,c,lw=3,c='b',label="LSST")
plt.plot(bins3[:-1]+db3/2.,d,lw=3,c='r',label="DES")
plt.plot(bins1[:-1]+db1/2.,b,lw=3,c='g',label="CFHT")
plt.legend()
def gauss_filter(im, sigma = 10):
    from scipy.ndimage.filters import gaussian_filter as gf
    import numpy as np
    return gf(im, sigma = sigma)
Esempio n. 49
0
        cnt = 0
        continue
    else:
        bind = cnt % nbuff
        img[:,:,:]  = buff[(bind+tind)%nbuff]
        bkg[:,:,:] -= buff[bind]/float(nbuff)
        bkg[:,:,:] += frame/float(nbuff)
        buff[bind]  = frame
        cnt += 1


    # calculate the difference image then rebin and smooth
    diff[:,:,:] = img-bkg
    #diffb[:,:]  = gf((np.abs(diff).mean(2)*mask).reshape(nrow/fac,fac,ncol/fac,fac
vxpts = (array(self.tracks[i]).T[0][1:-1] - array(self.tracks[i]).T[0][:-2])/vxth    #                                              ).mean(1).mean(-1),[3,1])
    diffb[:,:]  = gf(np.abs(diff).mean(2).reshape(nrow/fac,fac,ncol/fac,fac
                                                  ).mean(1).mean(-1),[3,1]) 

    
    # update plots by removing labels then updating
    try:
        [i.remove() for i in recs]
    except:
        pass

    labcnt = 0
    labs = label(diffb>30)
    nlab = labs[1]


    if nlab>0:
        rcen = np.zeros(nlab)
Esempio n. 50
0
    elif ii==nbuff:
        bkg[:,:,:] = buff.mean(0)
        cnt = 0
        continue
    else:
        bind = cnt % nbuff
        img[:,:,:]  = buff[(bind+tind)%nbuff]
        bkg[:,:,:] -= buff[bind]/float(nbuff)
        bkg[:,:,:] += frame/float(nbuff)
        buff[bind]  = frame
        cnt += 1


    # calculate the difference image then rebin and smooth
    diff[:,:,:] = img-bkg
    diffb[:,:]  = gf((np.abs(diff).mean(2)*mask).reshape(nrow/fac,fac,ncol/fac,fac
                                                  ).mean(1).mean(-1),[1,1])
    #diffb[:,:]  = gf(np.abs(diff).mean(2).reshape(nrow/fac,fac,ncol/fac,fac
    #                                              ).mean(1).mean(-1),[1,1]) 

    
    # update plots by removing labels then updating
    try:
        [i.remove() for i in recs]
    except:
        pass

    labcnt = 0
    labs = label(diffb>30)
    nlab = labs[1]

Esempio n. 51
0
from scipy import misc
from encoding2 import encode_ar, decode_ar, slice_box, compress_np, decompress_np
import itertools as its
from SSTV.wavcompress import getSize
from SSTV.wavcompress import expand_dwt

# Set up
fraction_coeffs = .01
image_fname = 'imgs/lena.bmp'
image = misc.imread(image_fname) # load an image: {dude, cal, lake,...}
original = np.copy(image)
_delta = 0.1 
or_shape = image.shape

# Gaussian Filter [IMPORTANT] -- LPF
image = gf(image, 0.08) 
image = join_imgs([ map2multiple(im, 64) for im in split_img(image)]) # for weird shapes, map2multiple
yccimgs = split_img(rgb2ycc(image))
wavelet='bior4.4'

print('Original shape:',or_shape)
print('New shape:',image.shape)

dwts = []
ds_ycc = []
compressed_imgs = []
us_ycc = []

sample_facts = [0,2,2]

print('Downsampling YCC channels...')
Esempio n. 52
0
 def filterf(self):
     """Gaussian filtering of velocity """
     from scipy.ndimage.filters import gaussian_filter as gf
     self._obj['u'] = xr.DataArray(gf(self._obj['u'],1),dims=('x','y'))
     self._obj['v'] = xr.DataArray(gf(self._obj['v'],1),dims=('x','y'))
     return self._obj
Esempio n. 53
0
def mod_zmod(composite, mod_id, algorithm, **kwargs):
    # template
    template = composite.templates.get(name="source")  # SOURCE TEMPLATE

    # channels
    zmod_channel, zmod_channel_created = composite.channels.get_or_create(name="-zmod")
    zmean_channel, zmean_channel_created = composite.channels.get_or_create(name="-zmean")
    zbf_channel, zbf_channel_created = composite.channels.get_or_create(name="-zbf")
    zcomp_channel, zcomp_channel_created = composite.channels.get_or_create(name="-zcomp")

    # constants
    delta_z = -8
    size = 5
    sigma = 5
    template = composite.templates.get(name="source")

    # iterate over frames
    for t in range(composite.series.ts):
        print("step01 | processing mod_zmod t{}/{}...".format(t + 1, composite.series.ts), end="\r")

        # load gfp
        gfp_gon = composite.gons.get(t=t, channel__name="0")
        gfp = exposure.rescale_intensity(gfp_gon.load() * 1.0)
        gfp = gf(gfp, sigma=sigma)  # <<< SMOOTHING

        # load bf
        bf_gon = composite.gons.get(t=t, channel__name="1")
        bf = exposure.rescale_intensity(bf_gon.load() * 1.0)

        # initialise images
        Z = np.zeros(composite.series.shape(d=2), dtype=int)
        Zmean = np.zeros(composite.series.shape(d=2))
        Zbf = np.zeros(composite.series.shape(d=2))
        Zcomp = np.zeros(composite.series.shape(d=2))

        # loop over image
        for r in range(composite.series.rs):
            for c in range(composite.series.cs):

                # scan
                data = scan_point(gfp, composite.series.rs, composite.series.cs, r, c, size=size)
                normalised_data = np.array(data) / np.max(data)

                # data
                z = int(np.argmax(normalised_data))
                cz = z + delta_z  # corrected z
                mean = 1.0 - np.mean(normalised_data)  # 1 - mean
                bfz = bf[r, c, cz]

                Z[r, c] = cz
                Zmean[r, c] = mean
                Zbf[r, c] = bfz
                Zcomp[r, c] = bfz * mean

                # images to channel gons
        zmod_gon, zmod_gon_created = composite.gons.get_or_create(
            experiment=composite.experiment, series=composite.series, channel=zmod_channel, t=t
        )
        zmod_gon.set_origin(0, 0, 0, t)
        zmod_gon.set_extent(composite.series.rs, composite.series.cs, 1)

        zmod_gon.array = Z
        zmod_gon.save_array(composite.series.experiment.composite_path, template)
        zmod_gon.save()

        zmean_gon, zmean_gon_created = composite.gons.get_or_create(
            experiment=composite.experiment, series=composite.series, channel=zmean_channel, t=t
        )
        zmean_gon.set_origin(0, 0, 0, t)
        zmean_gon.set_extent(composite.series.rs, composite.series.cs, 1)

        zmean_gon.array = exposure.rescale_intensity(Zmean * 1.0)
        zmean_gon.save_array(composite.series.experiment.composite_path, template)
        zmean_gon.save()

        zbf_gon, zbf_gon_created = composite.gons.get_or_create(
            experiment=composite.experiment, series=composite.series, channel=zbf_channel, t=t
        )
        zbf_gon.set_origin(0, 0, 0, t)
        zbf_gon.set_extent(composite.series.rs, composite.series.cs, 1)

        zbf_gon.array = Zbf
        zbf_gon.save_array(composite.series.experiment.composite_path, template)
        zbf_gon.save()

        zcomp_gon, zcomp_gon_created = composite.gons.get_or_create(
            experiment=composite.experiment, series=composite.series, channel=zcomp_channel, t=t
        )
        zcomp_gon.set_origin(0, 0, 0, t)
        zcomp_gon.set_extent(composite.series.rs, composite.series.cs, 1)

        zcomp_gon.array = Zcomp
        zcomp_gon.save_array(composite.series.experiment.composite_path, template)
        zcomp_gon.save()
Esempio n. 54
0
def res_lc_plot():

    # -- read in the residential data
    lcs = []

    for night in range(22):
        fopen = open(os.path.join(os.environ['DST_WRITE'],'res_lc_night_' + 
                                  str(night).zfill(2) + '_2.pkl'),'rb')
        lcs.append(pkl.load(fopen))
        fopen.close()


    # -- utilities
    tcks, htimes = time_ticks()


    # -- define fri/sat and all other days
#    we = [0,6,7,13,14,20,21]
#    wd = [1,2,3,4,5,8,9,10,11,12,15,16,17,18,19]
    we = [0,6,7,13,14,20,21]
    wd = [1,2,3,4,5,8,9,10,11,12,15,16,17,18,19]


    # -- get mean averaged lc
    tmax = min([len(i.mean(0)) for i in lcs if len(i.mean(0))>3000])
    mnwd = np.zeros(tmax)
    mnwe = np.zeros(tmax)

    cnt = 0.0
    for idy in wd:
        if idy==8: continue
        if len(lcs[idy].mean(0))>3000:
            mnwd += lcs[idy].mean(0)[:tmax]
            cnt += 1.0
    mnwd /= cnt

    cnt = 0.0
    for idy in we:
        if idy==21: continue
        if len(lcs[idy].mean(0))>3000:
            mnwe += lcs[idy].mean(0)[:tmax]
            cnt += 1.0
    mnwe /= cnt

    plt.figure(1,figsize=[5,5])
    plt.xticks(tcks,htimes,rotation=30)
    plt.ylabel('intensity [arb units]',size=15)
    plt.xlim([0,3600])
    plt.grid(b=1)
    plt.plot(mnwd,'k')
    plt.plot(mnwe,'r')
    plt.savefig('../output/res_lc_mean.png',clobber=True)
    plt.close()


    # -- open the figure
    plt.figure(0,figsize=[15,5])
    plt.subplots_adjust(0.05,0.1,0.95,0.95)

    plt.subplot(131)
    for idy in wd: 
#        plt.plot(mf(lcs[idy][0]/lcs[idy][0,0],6))
#        plt.plot(mf(lcs[idy].mean(0)/lcs[idy].mean(0)[0],6))
#        cdf = np.cumsum(lcs[idy].mean(0))
#        plt.plot(cdf/cdf[360]/(np.arange(cdf.size)/360.))
        lc_sm = gf(lcs[idy].mean(0)/lcs[idy].mean(0)[0],6)
        norm = (lc_sm-lc_sm[-1])
#        plt.plot(lc_sm/lc_sm[0])
        plt.plot(norm/norm[0])
#        plt.plot(gf((lc_sm-np.roll(lc_sm,1))[1:-1],360))
#    plt.ylim([0.8,1.2])
#    plt.ylim([0.6,1.4])
    plt.ylim([-0.1,1.4])
    plt.grid(b=1)
    plt.xticks(tcks,htimes,rotation=30)
    plt.xlim([0,3600])
    plt.ylabel('intensity [arb units]',size=15)

    plt.subplot(132)
    for idy in we: 
#        plt.plot(mf(lcs[idy][0]/lcs[idy][0,0],6))
#        plt.plot(mf(lcs[idy].mean(0)/lcs[idy].mean(0)[0],6))
#        cdf = np.cumsum(lcs[idy].mean(0))
#        plt.plot(cdf/cdf[360]/(np.arange(cdf.size)/360.))
        lc_sm = gf(lcs[idy].mean(0)/lcs[idy].mean(0)[0],6)
        norm = (lc_sm-lc_sm[-1])
#        plt.plot(lc_sm/lc_sm[0])
        plt.plot(norm/norm[0])
#        plt.plot(gf((lc_sm-np.roll(lc_sm,1))[1:-1],360))
#    plt.ylim([0.6,1.4])
#    plt.ylim([0.8,1.2])
    plt.ylim([-0.1,1.4])
    plt.grid(b=1)
    plt.xticks(tcks,htimes,rotation=30)
    plt.xlim([0,3600])
    plt.ylabel('intensity [arb units]',size=15)

    plt.subplot(133)
    for idy in wd: 
#        plt.plot(mf(lcs[idy][0]/lcs[idy][0,0],6),'k')
        plt.plot(mf(lcs[idy].mean(0)/lcs[idy].mean(0)[0],6),'k')
    for idy in we: 
#        plt.plot(mf(lcs[idy][0]/lcs[idy][0,0],6),'r')
        plt.plot(mf(lcs[idy].mean(0)/lcs[idy].mean(0)[0],6),'r')
    plt.ylim([0.6,1.4])
    plt.grid(b=1)
    plt.xticks(tcks,htimes,rotation=30)
    plt.xlim([0,3600])
    plt.ylabel('intensity [arb units]',size=15)

    plt.savefig('../output/res_lc_all.png',clobber=True)
    plt.close()

    return
def test_filter(learning_rate=0.1, n_epochs=1000, nkerns=[3, 512],
            batch_size=200, verbose=True):
    """
    Wrapper function for testing LeNet on SVHN dataset

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type nkerns: list of ints
    :param nkerns: number of kernels on each layer

    :type batch_size: int
    :param batch_szie: number of examples in minibatch.

    :type verbose: boolean
    :param verbose: to print out epoch summary or not to.

    """

    rng = numpy.random.RandomState(23455)

    datasets = load_data()

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
    n_test_batches = test_set_x.get_value(borrow=True).shape[0]
    n_train_batches //= batch_size
    n_valid_batches //= batch_size
    n_test_batches //= batch_size

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch

    x = T.matrix('x')   # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of
                        # [int] labels

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print('... building the model')

    # Reshape matrix of rasterized images of shape (batch_size, 3 * 32 * 32)
    # to a 4D tensor, compatible with our LeNetConvPoolLayer
    layer0_input = x.reshape((batch_size, 3, 32, 32))

    # TODO: Construct the first convolutional pooling layer
    layer0 = LeNetConvPoolLayer(
        rng,
        input=layer0_input,
        # (batch size, num input feature maps,image height, image width)
        image_shape=(batch_size,3,32,32),
        # number of filters, num input feature maps,filter height, filter width)
        filter_shape=(nkerns[0],3,5,5),
        poolsize=(2,2)
    )

    # TODO: Construct the second convolutional pooling layer
    layer1 = LeNetConvPoolLayer(
        rng,
        input=layer0.output,
        # (32-5+1)/2
        image_shape=(batch_size,nkerns[0],14,14),
        filter_shape=(nkerns[1],nkerns[0],5,5),
        poolsize=(2,2)
    )

    # the HiddenLayer being fully-connected, it operates on 2D matrices of
    # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
    layer2_input = layer1.output.flatten(2)

    # TODO: construct a fully-connected sigmoidal layer
    layer2 = HiddenLayer(
        rng,
        input=layer2_input,
        # (14-5+1)/2
        n_in=nkerns[1] * 5 * 5,
        n_out=500,
        activation=T.nnet.sigmoid
    )

    # TODO: classify the values of the fully-connected sigmoidal layer
    layer3 = LogisticRegression(
         input=layer2.output,
         n_in=500,
         n_out=10)

    # the cost we minimize during training is the NLL of the model
    cost = layer3.negative_log_likelihood(y)

    # create a function to compute the mistakes that are made by the model
    test_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    validate_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: valid_set_x[index * batch_size: (index + 1) * batch_size],
            y: valid_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    # TODO: create a list of all model parameters to be fit by gradient descent
    params = layer3.params + layer2.params + layer1.params + layer0.params

    # create a list of gradients for all model parameters
    grads = T.grad(cost, params)

    # train_model is a function that updates the model parameters by
    # SGD Since this model has many parameters, it would be tedious to
    # manually create an update rule for each model parameter. We thus
    # create the updates list by automatically looping over all
    # (params[i], grads[i]) pairs.
    updates = [
        (param_i, param_i - learning_rate * grad_i)
        for param_i, grad_i in zip(params, grads)
    ]

    train_model = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],
            y: train_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    ###############
    # TRAIN MODEL #
    ###############
    print('... training')

    train_nn(train_model, validate_model, test_model,
        n_train_batches, n_valid_batches, n_test_batches, n_epochs, verbose)
    
    mean_w_0 = layer0.W.get_value().mean()

    plt.figure()
    for knkerns0 in range(nkerns[0]):
        for kch in range(3):
            plt.subplot(3,3,knkerns0*3+kch+1)
            plt.imshow(layer0.W.get_value()[knkerns0,kch,:,:])
    plt.title('trained filter')
    
    
    ###########################################################################
    ###########################################################################
    ###########################################################################
    
    filter_shape_input = (nkerns[0],3,5,5)

    pt_input = numpy.zeros((filter_shape_input[2],filter_shape_input[3]))
    pt_input[(filter_shape_input[2]-1)/2,(filter_shape_input[3]-1)/2]=1.0
    
    W = numpy.zeros(filter_shape_input)
    
    from scipy.ndimage.filters import gaussian_filter as gf    
    
    for knkerns0 in range(nkerns[0]):
        for kch in range(3):
            W[knkerns0,kch,:,:]=gf(pt_input,(knkerns0+1.0))
            W[knkerns0,kch,:,:] = W[knkerns0,kch,:,:]/W[knkerns0,kch,:,:].mean()*mean_w_0
    
    W = theano.shared(W,borrow=True)
    # TODO: Construct the first convolutional pooling layer
    layer0 = LeNetConvPoolLayer(
        rng,
        input=layer0_input,
        # (batch size, num input feature maps,image height, image width)
        image_shape=(batch_size,3,32,32),
        # number of filters, num input feature maps,filter height, filter width)
        filter_shape=filter_shape_input,
        poolsize=(2,2)
    )
    layer0.W = W

    # TODO: Construct the second convolutional pooling layer
    layer1 = LeNetConvPoolLayer(
        rng,
        input=layer0.output,
        # (32-5+1)/2
        image_shape=(batch_size,nkerns[0],14,14),
        filter_shape=(nkerns[1],nkerns[0],5,5),
        poolsize=(2,2)
    )

    # the HiddenLayer being fully-connected, it operates on 2D matrices of
    # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
    layer2_input = layer1.output.flatten(2)

    # TODO: construct a fully-connected sigmoidal layer
    layer2 = HiddenLayer(
        rng,
        input=layer2_input,
        # (14-5+1)/2
        n_in=nkerns[1] * 5 * 5,
        n_out=500,
        activation=T.nnet.sigmoid
    )

    # TODO: classify the values of the fully-connected sigmoidal layer
    layer3 = LogisticRegression(
         input=layer2.output,
         n_in=500,
         n_out=10)

    # the cost we minimize during training is the NLL of the model
    cost = layer3.negative_log_likelihood(y)

    # create a function to compute the mistakes that are made by the model
    test_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    validate_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: valid_set_x[index * batch_size: (index + 1) * batch_size],
            y: valid_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    # TODO: create a list of all model parameters to be fit by gradient descent
    # the param of layer0 is excluded
    params = layer3.params + layer2.params + layer1.params

    # create a list of gradients for all model parameters
    grads = T.grad(cost, params)

    # train_model is a function that updates the model parameters by
    # SGD Since this model has many parameters, it would be tedious to
    # manually create an update rule for each model parameter. We thus
    # create the updates list by automatically looping over all
    # (params[i], grads[i]) pairs.
    updates = [
        (param_i, param_i - learning_rate * grad_i)
        for param_i, grad_i in zip(params, grads)
    ]

    train_model = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],
            y: train_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    ###############
    # TRAIN MODEL #
    ###############
    print('... training')

    train_nn(train_model, validate_model, test_model,
        n_train_batches, n_valid_batches, n_test_batches, n_epochs, verbose)

    plt.figure()
    for knkerns0 in range(nkerns[0]):
        for kch in range(3):
            plt.subplot(3,3,knkerns0*3+kch+1)
            plt.imshow(layer0.W.get_value()[knkerns0,kch,:,:])
    plt.title('pre-defined filter')
Esempio n. 56
0
	def handle(self, *args, **options):
		# vars
		experiment_name = options['expt']
		series_name = options['series']

		if experiment_name!='' and series_name!='':
			experiment = Experiment.objects.get(name=experiment_name)
			series = experiment.series.get(name=series_name)

			# cell_instance = series.cell_instances.get(t=48, cell__pk=4)
			# cell_instance = series.cell_instances.get(t=49, cell__pk=9)
			# cell_instance = series.cell_instances.get(pk=198)

			# load mask image
			# 1. for each cell mask, load mask image
			outlines = {}
			colours = ['red','green','blue','purple']
			for i, cell_mask in enumerate(cell_instance.masks.filter(channel__name__contains='zunique')):
				mask_img = cell_mask.load()

				# get edge
				edge_img = edge_image(mask_img)

				# get list of points
				points_r, points_c = np.where(edge_img)
				points = [list(lm) for lm in list(zip(points_r, points_c))]

				sorted_points = roll_edge_v1(points)

				# plot distances in order
				cell_centre = np.array([cell_mask.r, cell_mask.c])
				distances = np.array([np.linalg.norm(cell_centre - np.array(p)) for p in sorted_points])
				argmin = np.argmin(distances)
				distances = np.roll(distances, -argmin)
				distances = gf(distances, sigma=2)
				# plt.plot(distances)
				# plt.scatter(cell_mask.c, cell_mask.r)

				# find peaks in distance array
				peaks = find_peaks(distances, np.array([9]))
				# plt.scatter(peaks, [distances[peak] for peak in peaks])

				# roll back to find true peak positions
				true_peaks = np.array(peaks) + argmin
				true_peaks[true_peaks>=len(sorted_points)] -= len(sorted_points)

				# find end point of protrusions
				protrusion_end_points = [sorted_points[peak] for peak in true_peaks]

				for protrusion_end_point in protrusion_end_points:
					print('new protrusion for cell mask {} for cell instance {}'.format(cell_mask.pk, cell_instance.pk))
					relative_end_point = cell_centre - np.array(protrusion_end_point)
					print(cell_centre, protrusion_end_point)
					print('length from centre: {} microns'.format(np.linalg.norm(relative_end_point * series.scaling())))
					print('orientation: {} degrees'.format(180 / math.pi * math.atan2(relative_end_point[0], relative_end_point[1])))

				# plt.scatter([sorted_points[peak][1] for peak in true_peaks], [sorted_points[peak][0] for peak in true_peaks])

				# plot outlines to check
				plt.plot([point[1] for point in sorted_points], [point[0] for point in sorted_points], label='radius: 2')
				# plt.scatter(points_c, points_r, label=cell_mask.channel.name, color=colours[i])

			# plt.legend()
			# plt.axis('equal')
			plt.show()

		else:
			print('Please enter an experiment')
Esempio n. 57
0
from scipy.ndimage.filters import gaussian_filter as gf
from scipy.interpolate import *
from scipy import integrate
import numpy as np
import pylab
import matplotlib.pyplot as plt
import matplotlib.image as pim
import math
import interpimage
import time
from matplotlib.widgets import Button
 
 
sigma = 5
im1=np.array(Image.open('images/blacksquare.png'), dtype=float)
gauss=gf(im1,sigma,2)
 
alpha = 0.51
beta = 1
gamma = 5
ts = 100
points = 500
thrs = 30
 
#Apply gaussian
Ix=gf(im1,sigma,(1,0))
Iy=gf(im1,sigma,(0,1))
 
Ixx=gf(im1,sigma,(2,0))
Iyy=gf(im1,sigma,(0,2))
Ixy=gf(im1,sigma,(1,1))
Esempio n. 58
0
a = np.zeros((nc,nc),dtype=np.float32) 
zl = xv.min() - 5.0
zu = xv.max() + 5.0
yl = yv.min() - 5.0
yu = yv.max() + 5.0 
lz = zu - zl 
ly = yu - yl
print lz, ly
dz = lz/nc 
dy = -ly/nc # Because "y" coordinate increases in opposite direction to "y" array index of a (or a2).

def zloc(cood):
    return int((cood-zl)/dz) + 1 

def yloc(cood):
    return int((cood-yl)/dy) + 1 

for i in xrange(xv.size):
    zpos = zloc(xv[i]) 
    ypos = yloc(yv[i])
    a[ypos, zpos] += 1.0

a2 = gf(a, 1.0)

save_data = False 
if save_data: 
    a2.tofile('mockdata_3d_nc100.dat') # Save for fitjet_3d.py 

plt.imshow(a2, cmap=cm.Blues) 
plt.show()