Exemple #1
0
def test_rot180():
    import cudamat
    import numpy as np
    num_images = 100
    img_size = 50
    img_tot_size = 50*50


    inputs = np.random.randn(num_images, img_tot_size)
    inputs[1:] = (np.random.rand(*inputs[1:].shape)<0.5)
    inputs[0] = (np.random.rand(*inputs[1].shape)<0.005)

    targets = np.random.randn(num_images, img_tot_size)

    cu_inputs = cudamat.CUDAMatrix(inputs.T)
    cu_targets = cudamat.CUDAMatrix(targets.T)

    cudamat._cudamat.rot180(cu_inputs.p_mat, cu_targets.p_mat, 0)

    cua_targets = cu_targets.asarray().T

    targets = np.array([x[::-1,::-1]
                        for x in inputs.reshape(num_images, img_size, img_size)]).reshape(num_images, img_tot_size)

    print abs(targets - cua_targets).max()

    from pylab import imshow, subplot, gray
    gray()
    subplot(221)
    imshow(inputs[0].reshape(img_size, img_size), interpolation='nearest')
    subplot(222)
    imshow(targets[0].reshape(img_size, img_size), interpolation='nearest')

    subplot(223)
    imshow(cua_targets[0].reshape(img_size, img_size), interpolation='nearest')
Exemple #2
0
def test_copyOutOf():
    import cudamat
    import numpy as np
    num_images = 100
    img_size = 50
    target_size = 72
    img_tot_size = img_size**2
    target_tot_size = target_size**2

    targets = np.random.randn(target_tot_size, num_images)<-2
    inputs = np.zeros((img_tot_size, num_images))

    cu_inputs = cudamat.CUDAMatrix(inputs)
    cu_targets = cudamat.CUDAMatrix(targets)

    assert (target_size - img_size) % 2 == 0
    padding = (target_size - img_size)/2
    cudamat._cudamat.copy_out_of_center(cu_targets.p_mat, cu_inputs.p_mat, padding, 0)

    cua_inputs = cu_inputs.asarray()

    #print abs(targets - cua_targets).max()

    from pylab import imshow, subplot, gray
    gray()

    #subplot(221)
    #imshow(inputs[0].reshape(img_size, img_size), interpolation='nearest')
    subplot(222)
    imshow(targets[:,1].reshape(target_size, target_size), interpolation='nearest')

    subplot(223)
    imshow(cua_inputs[:,1].reshape(img_size, img_size), interpolation='nearest')
Exemple #3
0
def makeTestPair(paths, homography, collection, location=".", size=(250,250), scale = 1.0) :
    """ Given a pair of paths to two images and a homography between them,
        this function creates two crops and calculates a new homography.
        input: paths [strings] (paths to images)
               homography [numpy.ndarray] (3 by 3 array homography)
               collection [string] (The name of the testset)
               location [string] (The location (path) of the testset
               size [(int, int)] (The size of an image crop in pixels)
               scale [double] (The scale by which we resize the crops after they've been cropped)
        out:   nothing
    """
    
    # Get width and height
    width, height = size
    
    # Load images in black/white
    images = map(loadImage, paths)
    
    # Crop part of first image and part of second image:
    (top_o, left_o) = (random.randint(0, images[0].shape[0]-height), random.randint(0, images[0].shape[1]-width))
    (top_n, left_n) = (random.randint(0, images[1].shape[0]-height), random.randint(0, images[1].shape[1]-width))
    
    # Get two file names
    c_path = getRandPath("%s/%s/" % (location, collection))
    if not exists(dirname(c_path)) : makedirs(dirname(c_path))
        
    # Make sure we save as gray
    pylab.gray()
    
    im1 = images[0][top_o: top_o + height, left_o: left_o + width]
    im2 = images[1][top_n: top_n + height, left_n: left_n + width]
    im1_scaled = imresize(im1, size=float(scale), interp='bicubic')
    im2_scaled = imresize(im2, size=float(scale), interp='bicubic')
    pylab.imsave(c_path + "_1.jpg", im1_scaled)
    pylab.imsave(c_path + "_2.jpg", im2_scaled)
    
    # Homography for transpose
    T1 = numpy.identity(3)
    T1[0,2] = left_o
    T1[1,2] = top_o
    
    # Homography for transpose back
    T2 = numpy.identity(3)
    T2[0,2] = -1*left_n
    T2[1,2] = -1*top_n
    
    # Homography for scale
    Ts = numpy.identity(3)
    Ts[0,0] = scale
    Ts[1,1] = scale
    
    # Homography for scale back
    Tsinv = numpy.identity(3)
    Tsinv[0,0] = 1.0/scale
    Tsinv[1,1] = 1.0/scale
    
    # Combine homographyies and save
    hom = Ts.dot(T2).dot(homography).dot(T1).dot(Tsinv)
    hom = hom / hom[2,2]
    numpy.savetxt(c_path, hom)
def plotSources(im, sources, schema):
    plt.clf()
    plt.imshow(im.getArray(), origin='lower', interpolation='nearest',
               vmin=-100, vmax=500)
    plt.gray()
    shapekey = schema.find('shape.sdss').key
    xykey = schema.find('centroid.sdss').key

    flagkeys = [schema.find(x).key for x in [
        'shape.sdss.flags.maxiter', 'shape.sdss.flags.shift',
        'shape.sdss.flags.unweighted', 'shape.sdss.flags.unweightedbad']]
    flagabbr = ['M','S','U','B']

    for source in sources:
        quad = source.get(shapekey)
        ixx,iyy = quad.getIxx(), quad.getIyy()
        x,y = source.get(xykey)
        sx,sy = sqrt(ixx),sqrt(iyy)
        plt.gca().add_artist(Ellipse([x,y], 2.*sx, 2.*sy, angle=0.,
                                     ec='r', fc='none', lw=2, alpha=0.5))
        fs = ''
        for j,key in enumerate(flagkeys):
            val = source.get(key)
            if val:
                fs += flagabbr[j]
        if len(fs):
            plt.text(x+5, y, fs, ha='left', va='center')
Exemple #5
0
def show_weights(layer):        
    while isinstance(layer, lasagne.layers.InputLayer) == False:
        if isinstance(layer, dnn.Conv2DDNNLayer):
            first_conv_layer =  layer
        layer = layer.input_layer
    
    weights = first_conv_layer.get_params()[0].get_value()
    weights_no = weights.shape[0]
    
    display_size = int(math.sqrt(weights_no)) + 1
    
    print 'display_size : %s' % display_size
    
    pylab.gray() 
    for i in range(display_size):
        for j in range(display_size):
            index = i * display_size + j + 1
            
            if index >= weights_no:
                break

            print 'index : %s' % index
    
            one_weight = weights[index][0]
            pylab.subplot(display_size, display_size, index) 
            pylab.axis('off') 
            pylab.imshow(one_weight)
    
    pylab.show()
Exemple #6
0
def _test():
    # make a unit circle going counter-clockwise
    radius = 60
    theta = np.linspace(0, 2*np.pi, 10)
    circle_ccw = np.array([radius*np.cos(theta), radius*np.sin(theta)])
    area = ContourArea(circle_ccw)
    assert area > 0
    circle_cw = MakeClockwise(circle_ccw)
    area = ContourArea(circle_cw)
    assert area < 0
    assert (circle_cw == circle_ccw[:,::-1]).all() # it actually got reversed
    p = circle_cw + np.array([[280],[430]])

    plb.ion()
    plb.figure(0)
    plb.gray()
    i = plb.imread('mri2.png')
    i = np.mean(i, axis=2)
    plb.imshow(i)
    global _contour
    _contour, = plb.plot(np.append(p[0,-1], p[0]),np.append(p[1,-1], p[1]))
    plb.draw()
    Snake2D(i, p, iterations=500)
    print 'done'
    plb.ioff()
    plb.savefig('mri-result.png')
    plb.show()
Exemple #7
0
def main():
    butils = measDeblend.BaselineUtilsF

    foot = buildExample2()

    fbb = foot.getBBox()
    mask1 = afwImg.MaskU(fbb.getWidth(), fbb.getHeight())
    mask1.setXY0(fbb.getMinX(), fbb.getMinY())
    afwDet.setMaskFromFootprint(mask1, foot, 1)

    if plt:
        plt.clf()
        plt.imshow(mask1.getArray(), origin='lower', interpolation='nearest',
                   extent=(fbb.getMinX(), fbb.getMaxX(), fbb.getMinY(), fbb.getMaxY()))
        plt.gray()
        plt.savefig('foot2.png')

        sfoot = butils.symmetrizeFootprint(foot, 355, 227)

        mask2 = afwImg.MaskU(fbb.getWidth(), fbb.getHeight())
        mask2.setXY0(fbb.getMinX(), fbb.getMinY())
        afwDet.setMaskFromFootprint(mask2, sfoot, 1)

        plt.clf()
        plt.imshow(mask2.getArray(), origin='lower', interpolation='nearest',
                   extent=(fbb.getMinX(), fbb.getMaxX(), fbb.getMinY(), fbb.getMaxY()))
        plt.gray()
        plt.savefig('sfoot3.png')
Exemple #8
0
    def plot(self):
        """
        
        .. plot::
            :include-source:
            :width: 80%
            
            from cellnopt.simulate import *
            from cellnopt.core import *
            pkn = cnodata("PKN-ToyPB.sif")
            midas = cnodata("MD-ToyPB.csv")
            s = boolean.BooleanSimulator(CNOGraph(pkn, midas))
            s.simulate(30)
            s.plot()
        """
        
        pylab.clf()

        data = numpy.array([self.data[x] for x in self.species if x in self.species])
        data = data.transpose()
        data = 1 - pylab.flipud(data)

        pylab.pcolor(data, vmin=0, vmax=1, edgecolors="k")
        pylab.xlabel("species"); 
        pylab.ylabel("Time (tick)");
        pylab.gray()

        pylab.xticks([0.5+x for x in range(0,30)], self.species, rotation=90)

        pylab.ylim([0, self.tick])
        pylab.xlim([0, len(self.species)])
    def plot_matches(self, name, show_below = True, match_maximum = None):
        """ 対応点を線で結んで画像を表示する
          入力: im1,im2(配列形式の画像)、locs1,locs2(特徴点座標)
             machescores(match()の出力)、
             show_below(対応の下に画像を表示するならTrue)"""
        im1 = self._image_1.get_array_image()
        im2 = self._image_2.get_array_image()
        self.appendimages()
        im3 = self._append_image
        if self._match_score is None:
            self.match()
        locs1 = self._image_1.get_shift_location()
        locs2 = self._image_2.get_shift_location()
        if show_below:
            im3 = numpy.vstack((im3,im3))
        pylab.figure(dpi=160)
        pylab.gray()
        pylab.imshow(im3, aspect = 'auto')

        cols1 = im1.shape[1]
        match_num = 0
        for i,m in enumerate(self._match_score):
            if m > 0 : 
                pylab.plot([locs1[i][0],locs2[m][0]+cols1], [locs1[i][1],locs2[m][1]], 'c')
                match_num = match_num + 1
            if match_maximum is not None and match_num >= match_maximum:
                break
        pylab.axis('off')
        pylab.savefig(name, dpi=160)
Exemple #10
0
def demo_graymapping():
    """
    Gray space mapping, just change the way of distribution
    """
    im = np.array(Image.open('./computer_vision/scenery.jpg').convert('L'))
    """Reverse mapping"""
    im_r = 255 - im
    """Mapping gray space to specific range[100, 200]"""
    im_f = (100.0 / 255) * im + 100
    """Square pixel value"""
    im_s = 255 * (im/ 255.0)**2
    """Show image"""
    pl.figure('Gray space mapping')  
    pl.gray()
    pl.subplot(2,2,1)
    pl.title('orignal gray image')
    pl.imshow(im)
    pl.subplot(2,2,2)
    pl.title('Reversed gray')
    pl.imshow(im_r)
    pl.subplot(2,2,3)
    pl.title('Mapping to a [100,200]')
    pl.imshow(im_f)
    pl.subplot(2,2,4)
    pl.title('Square pixel value')
    pl.imshow(im_s)    
    pl.show()
def randoms(S=10, N=1, GA=10, GS=10):
    butils = measDeblend.BaselineUtilsF
    mim = afwImg.MaskedImageF(S, S)
    x0, y0 = S/2, S/2
    peak = makePeak(x0, y0)
    im = mim.getImage().getArray()

    for i in range(N):

        X, Y = np.meshgrid(np.arange(S), np.arange(S))
        R2 = (X-x0)**2 + (Y-y0)**2

        im[:, :] = np.random.normal(10, 1, size=im.shape) + GA * np.exp(-0.5 * R2 / GS**2)

        if plt:
            plt.clf()
            ima = dict(vmin=im.min(), vmax=im.max(), origin='lower', interpolation='nearest')
            plt.imshow(im, **ima)
            plt.gray()
            plt.savefig('Rim%i.png' % i)
            butils.makeMonotonic(mim.getImage(), peak)
            plt.clf()
            plt.imshow(mim.getImage().getArray(), **ima)
            plt.gray()
            plt.savefig('Rim%im.png' % i)
 def savepng(pre, img, title=None, **kwargs):
     fn = '%s-%s.png' % (pre, idstr)
     print 'Saving', fn
     plt.clf()
     plt.imshow(img, **kwargs)
     ax = plt.axis()
     if debug:
         print len(xplotx),len(allobjx)
         for i,(objx,objy,objc) in enumerate(zip(allobjx,allobjy,allobjc)):
             plt.plot(objx,objy,'-',c=objc)
             tempx = []
             tempx.append(xplotx[i])
             tempx.append(objx[0])
             tempy = []
             tempy.append(xploty[i])
             tempy.append(objy[0])
             plt.plot(tempx,tempy,'-',c='purple')
         plt.plot(pointx,pointy,'y.')
         plt.plot(xplotx,xploty,'xg')
     plt.axis(ax)
     if title is not None:
         plt.title(title)
     plt.colorbar()
     plt.gray()
     plt.savefig(fn)
Exemple #13
0
def explore_data(data, images, target):

	# try to determine the type of data...
	print "data_type belonging to key data:"
	try: 
		print np.dtype(data)
	except TypeError as err:
		print err
	
	print "It has dimension", np.shape(data)

	# plot a 3
	
	# get indices of all threes in target
	threes = np.where(target == 3)
	#assert threes is not empty
	assert(len(threes) > 0)
	# choose the first 3
	three_indx = threes[0]
	# get the image
	img = images[three_indx][0]

	#plot it
	plot.figure()
	plot.gray()
	plot.imshow(img, interpolation = "nearest")
	plot.show()
	plot.close()
def visualize_walkthrough():
	x_batch = sample_x_from_data_distribution(20)
	z_batch = gen(x_batch, test=True)
	if use_gpu:
		z_batch.to_cpu()

	fig = pylab.gcf()
	fig.set_size_inches(16.0, 16.0)
	pylab.clf()
	if config.img_channel == 1:
		pylab.gray()
	
	z_a = z_batch.data[:10,:]
	z_b = z_batch.data[10:,:]
	for col in range(10):
		_z_batch = z_a * (1 - col / 9.0) + z_b * col / 9.0
		_z_batch = Variable(_z_batch)
		if use_gpu:
			_z_batch.to_gpu()
		_x_batch = dec(_z_batch, test=True)
		if use_gpu:
			_x_batch.to_cpu()
		for row in range(10):
			pylab.subplot(10, 10, row * 10 + col + 1)
			if config.img_channel == 1:
				pylab.imshow(np.clip((_x_batch.data[row] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_width, config.img_width)), interpolation="none")
			elif config.img_channel == 3:
				pylab.imshow(np.clip((_x_batch.data[row] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_channel, config.img_width, config.img_width)), interpolation="none")
			pylab.axis("off")
				
	pylab.savefig("%s/walk_through.png" % args.visualization_dir)
Exemple #15
0
    def updateColorTable(self, cItem):
        print "now viz!"+str(cItem.row())+","+str(cItem.column())

        row = cItem.row()
        col = cItem.column()

        pl.clf()
        #pl.ion()
        x = pl.arange(self.dataDimen+1)
        y = pl.arange(self.dataDimen+1)
        X, Y = pl.meshgrid(x, y)
        pl.subplot(1,2,1)
        pl.pcolor(X, Y, self.mWx[row*self.dataMaxRange+col])
        pl.gca().set_aspect('equal')
        pl.colorbar()
        pl.gray()
        pl.title("user 1")

        pl.subplot(1,2,2)
        pl.pcolor(X, Y, self.mWy[row*self.dataMaxRange+col])
        pl.gca().set_aspect('equal')
        pl.colorbar()
        pl.gray()
        pl.title("user 2")
        #pl.tight_layout()

        pl.draw()
        #pl.show()
        pl.show(block=False) 
 def demo(self):
     # plt.imshow(self.imgsobel*255/self.sobelmax)
     # plt.gray()
     # plt.show()
     plt.imshow(255-self.cannyOperation())
     plt.gray()
     plt.show()
Exemple #17
0
def compare_keypoints(im1, im2, pos1, pos2, filename = None, separation = 0) :
    """ Show two images next to each other with the keypoints marked
    """

    # Construct unified image
    im3 = append_images(im1,im2, separation)

    # Find the offset and add it
    offset = im1.shape[1]
    pos2_o = [(x+offset + separation,y) for (x,y) in pos2]

    # Create figure
    fig = pylab.figure(frameon=False, figsize=(12.0, 8.0))
    #ax = pylab.Axes(fig, [0., 0., 1., 1.])

    # Show images
    pylab.gray()
    pylab.imshow(im3)
    pylab.plot([x for x,y in pos1], [y for x,y in pos1], marker='o', color = '#00aaff', lw=0)
    pylab.plot([x for x,y in pos2_o], [y for x,y in pos2_o], marker='o', color = '#00aaff', lw=0)
    pylab.axis('off')

    pylab.xlim(0,im3.shape[1])
    pylab.ylim(im3.shape[0],0)

    if filename != None :
        fig.savefig(filename, bbox_inches='tight', dpi=300)
 def plot_matches(self, name = "harris_match.jpg", show_below = True, match_maximum = None):
     """ 対応点を線で結んで画像を表示する
     入力: 
     show_below(対応の下に画像を表示するならTrue)"""
     if self._append_image is None:
         self.appendimages()
     im1   = self._image_1.get_array_image()
     im2   = self._image_2.get_array_image()
     im3   = self._append_image
     if self._image_1.get_harris_point() is None:
         self._image_1.make_harris_points()
     if self._image_2.get_harris_point() is None:
         self._image_2.make_harris_points()
     locs1 = self._image_1.get_harris_point()
     locs2 = self._image_2.get_harris_point()
     if show_below:
         im3 = numpy.vstack((im3,im3))
     pylab.figure(dpi=160)
     pylab.gray()
     pylab.imshow(im3, aspect = "auto")
     
     cols1 = im1.shape[1]
     if self._match_score is None:
         self.match()
     if match_maximum is not None:
         self._match_score = self._match_score[:match_maximum]
     for i,m in enumerate(self._match_score):
         if m>0: 
             pylab.plot([locs1[i][1],locs2[m][1]+cols1],[locs1[i][0],locs2[m][0]],'c')
     pylab.axis('off')
     pylab.savefig(name, dpi=160)
Exemple #19
0
def main():
    """Compute the SSIM index on two input images specified on the cmd line."""
    import pylab
    argv = sys.argv
    if len(argv) != 3:
        print('usage: python -m sp.ssim image1.tif image2.tif', file=sys.stderr)
        sys.exit(2)

    try:
        from PIL import Image
        img1 = numpy.asarray(Image.open(argv[1]))
        img2 = numpy.asarray(Image.open(argv[2]))
    except Exception as e:
        e = 'Cannot load images' + str(e)
        print(e, file=sys.stderr)

    ssim_map = ssim(img1, img2)
    ms_ssim = msssim(img1, img2)

    pylab.figure()
    pylab.subplot(131)
    pylab.title('Image1')
    pylab.imshow(img1, interpolation='nearest', cmap=pylab.gray())
    pylab.subplot(132)
    pylab.title('Image2')
    pylab.imshow(img2, interpolation='nearest', cmap=pylab.gray())
    pylab.subplot(133)
    pylab.title('SSIM Map\n SSIM: %f\n MSSSIM: %f' % (ssim_map.mean(), ms_ssim))
    pylab.imshow(ssim_map, interpolation='nearest', cmap=pylab.gray())
    pylab.show()
    
    return 0
Exemple #20
0
def plotAVTable(experiment):
    pylab.figure()
    pylab.gray()
    pylab.pcolor(experiment.agent.module.params.reshape(81,4).max(1).reshape(9,9),
            shading='faceted')
    pylab.title("Action-Value table, %s, Run %d" %
            (experiment.agent.learner.__class__.__name__, experiment.stepid))
Exemple #21
0
def main():
    A = pl.imread(IMAGE_FILE)

    i = 1
    pc_values = (1, 5, 10, 20, 30, 40)
    for num_pcs in pc_values:

        # perform (truncated) pca
        egvecs, proj, egvals = pca(A, num_pcs)

        # reconstruct image
        A_rec = np.dot(egvecs, proj).T + np.mean(A, axis=0)
         
        # create sublplot
        ax = pl.subplot(2, 3, i, frame_on=False)
        ax.xaxis.set_major_locator(pl.NullLocator())
        ax.yaxis.set_major_locator(pl.NullLocator())

        # draw
        pl.imshow(A_rec)
        pl.title("{} pc's".format(num_pcs))
        pl.gray()

        i += 1

    pl.show()
def display_head(set_x, set_y, n = 5):
    '''
    show some figures based on gray image matrixs
    
    @type set_x: TensorSharedVariable, 
    @param set_x: gray level value matrix of the
    
    @type set_y: TensorVariable, 
    @param set_y: label of the figures    
    
    @type n: int, 
    @param n: numbers of figure to be display, less than 10, default 5
    '''
    import pylab
    
    if n > 10: n = 10
    img_x = set_x.get_value()[0:n].reshape(n, 28, 28)
    img_y = set_y.eval()[0:n]
    
    for i in range(n): 
        pylab.subplot(1, n, i+1); 
        pylab.axis('off'); 
        pylab.title(' %d' % img_y[i])
        pylab.gray()
        pylab.imshow(img_x[i])
	def interference_maker(self):
		wavelength = input("input wavelength: ")
		k = 2*pi/wavelength
		amplitude = input("input amplitude: ")
		distance_between_centers = input("input distance between centers: ")
		length = 100.0
		num_points = 500
		spacing = length/num_points

		first_x = length/2 + distance_between_centers/2
		second_x = length/2 - distance_between_centers/2

		first_y = length/2
		second_y = first_y

		points_array = empty([num_points, num_points], float)

		for i in range(num_points):
			y = spacing*i
			for j in range(num_points):
				x = spacing*j
				r1 = sqrt((x-first_x)**2 + (y - first_y)**2)
				r2 = sqrt((x-second_x)**2 + (y - second_y)**2)
				points_array[i, j] = amplitude*(sin(k*r1)+sin(k*r2))

		imshow(points_array, origin="lower", extent=[0, length, 0, length])
		gray()
		show()
Exemple #24
0
def apply_conv(name):
    import pylab
    from PIL import Image

    # open random image of dimensions 639x516
    img = Image.open(open(name))

    # capture image height and width
    width,height = img.size

    # dimensions are (height, width, channel)
    img = numpy.asarray(img, dtype='float64') / 256.

    # put image in 4D tensor of shape (1, 3, height, width)
    img_ = img.transpose(2, 0, 1).reshape(1, 3, height, width)
    filtered_img = f(img_)

    # plot original image and first and second components of output
    pylab.subplot(1, 3, 1); pylab.axis('off'); pylab.imshow(img)
    pylab.gray();
    # recall that the convOp output (filtered image) is actually a "minibatch",
    # of size 1 here, so we take index 0 in the first dimension:
    pylab.subplot(1, 3, 2); pylab.axis('off'); pylab.imshow(filtered_img[0, 0, :, :])
    pylab.subplot(1, 3, 3); pylab.axis('off'); pylab.imshow(filtered_img[0, 1, :, :])
    pylab.show()
def display_conv_filters(title, layer):
    """
    displays the filters as "images"
    - one row per feature map in this layer
    - one column per input into this layer (one for the first layer, 
      one per previous layer's feature maps for the next layer)
    """
    filters = layer.W  # 4D Tensor of dimensions <number of feature maps, number of inputs, height, width>
    bias = layer.b  # vector of biases, one per feature map

    pylab.gray()  # make plots greyscale

    i = 0
    num_feature_maps = len(filters.eval())
    for map_num in range(num_feature_maps):  # iterate through the feature maps
        num_inputs = len(filters.eval()[map_num])
        for input_num in range(num_inputs):  # iterate through the inputs to this feature map
            i += 1
            img_data = filters.eval()[map_num][input_num]  # extract the (array of) filter values from the tensor slice
            pylab.subplot(num_feature_maps, num_inputs, i)
            pylab.axis('off')
            pylab.imshow(img_data)  # Plot it
            if i == 1:
                pylab.title(title)
    pylab.show()
Exemple #26
0
def test():

    from PIL import Image
    import numpy
    import pylab

    import os
    p = r'C:\repository\research_code\gwb_cropped\gwb_cropped'

    imlist = [p+'/'+f for f in os.listdir(p) if 'jpg' in f]

    im = numpy.array(Image.open(imlist[0])) #open one image to get the size
    m,n = im.shape[0:2] #get the size of the images
    imnbr = len(imlist) #get the number of images

#create matrix to store all flattened images
    immatrix = numpy.array([numpy.array(Image.open(imlist[i])).flatten() for i in range(imnbr)],'f')

#perform PCA
    V,S,immean = pca(immatrix)

#mean image and first mode of variation
    immean = immean.reshape(m,n)
    mode = V[0].reshape(m,n)

#show the images
    pylab.figure()
    pylab.gray()
    pylab.imshow(immean)

    pylab.figure()
    pylab.gray()
    pylab.imshow(mode)

    pylab.show()
    def rhoPlot(self, ccaMat, filename, winsize, framesize,dataDimen):


        dlen = len(ccaMat)
        #x = pl.arange(dlen)
        #y = pl.arange(dlen)
        print "dlen:"+str(dlen)

        order = dataDimen
        for i in range(order):
            
            pl.clf()
            #pl.ion()
            #第一成分はとった
            cMat = ccaMat[:,:,i]

            Y,X = np.mgrid[slice(0, dlen, 1),slice(0, dlen, 1)]

            #print "len X:"+str(len(X))+", X:"+str(X)
            #X, Y = np.mgrid[0:dlen:complex(0, dlen), 0:dlen:complex(0, dlen)]
            
            pl.pcolor(X, Y, cMat)
            pl.xlim(0,dlen-1)
            pl.ylim(0,dlen-1)
            name = str(filename) + "_w-" + str(winsize) + "_f-" + str(framesize)+"_"+str(i)
            pl.title(name)
            pl.colorbar()
            pl.gray()
            pl.draw()
            outname = "cca-eig/"+name+".png"
            pl.savefig(outname)
            print "eig order:",i," save!"
Exemple #28
0
def demo_histeq():
    """
    Gray histogram equalization, make every gray value have same distribution.
    This process could imporve contrast of image.
    """
    im = np.array(Image.open('./computer_vision/scenery.jpg').convert('L'))
    imhist, bins = np.histogram(im.flatten(), bins = 256, normed = True)
    cdf = imhist.cumsum()
    """Normalization"""
    cdf = 255 * cdf / cdf[-1]
    im_n = np.interp(im.flatten(),bins[:-1], cdf)
    """
    Image of equalization.
    I don't know why reshape() function can't modify shape attribute, 
    it just change the arrangement of output if you print it out.
    I've tried when you create a array with reshape(), it works.
    """
    #im_n.reshape(im.shape)
    im_n.shape = im.shape
    
    pl.figure('Histogram equalization')
    pl.subplot(1,2,1)
    pl.gray()
    pl.imshow(im)
    pl.title('Orignal gray image')
    pl.subplot(1,2,2)
    pl.gray()
    pl.imshow(im_n)
    pl.title('equalized image')
    pl.show()
Exemple #29
0
    def call_run(self):
        print('RlOp: running')
        # prepare plotting
        pylab.gray()
        pylab.ion()

        for i in range(1000):

            # interact with the environment (here in batch mode)
            self.experiment.doInteractions(100)
            self.agent.learn()
            self.agent.reset()

            results0 = self.table.params.reshape(2, 4, 5, 20)[0]
            results1 = self.table.params.reshape(2, 4, 5, 20)[1]
            pp.pprint(results0.argmax(2))
            pp.pprint(results1.argmax(2))

            # and draw the table
            #ar=self.table.params.reshape(2,5,4,5,4)
            #for state1 in range(len(constants.SOUNDS)):
            #    for state2 in range(4):
            #        pylab.pcolor(ar[1][state1][state2])
            #        pylab.draw()

        results0 = self.table.params.reshape(2, 4, 5, 20)[0]
        results1 = self.table.params.reshape(2, 4, 5, 20)[1]
        while True:
            time.sleep(60)
            pp.pprint(results0.argmax(2))
            pp.pprint(results1.argmax(2))
def display_output(images, batch_size, layer, num_feature_maps):
    """
    Visualises the convolution of the last image to be processed.
    NOTE: SAMPLE CODE ONLY - ONLY USED FOR LAYER0
    """
    # Create a theano function that computes the layer0 output for a single batch
    # This declares to theano what the input source and output expression are
    f = theano.function([layer.input], layer.output)

    # recast the inputs from (batch_size, num_pixels) to a 4D tensor of size (batch_size, 1, height, width)
    # as expected by the convolutional layer (the 1 is the "depth" of the input layer)
    img = images.eval()[0: batch_size].reshape(batch_size, 1, IMAGE_HEIGHT, IMAGE_WIDTH)
    filtered_img = f(img)
    filtered_img = numpy.add(filtered_img, -1. * filtered_img.min())  # Avoid negatives by ensuring the min value is 0

    pylab.gray();

    # Plot the original image
    pylab.subplot(1, 4, 1);
    pylab.axis('off');
    pylab.imshow(img[0, 0, :, :])
    pylab.title("Original image")

    # Plot each feature map
    for map_num in range(num_feature_maps):
        pylab.subplot(1, num_feature_maps + 1, map_num + 2);
        pylab.axis('off');
        pylab.imshow(filtered_img[0, map_num, :, :])
        pylab.title("Feature map " + str(map_num))
    pylab.show()
def compare_missed_segm(
        input_dir='/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentation',
        input_dir2='/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentationg',
        showPictures=False):

    ft = FASTex()

    (ms, dirs) = read_segm_data(input_dir)
    (ms2, dirs2) = read_segm_data(input_dir2, 'g')

    ms.extend(ms2)
    dirs.extend(dirs2)

    sumHash = {}
    for j in np.arange(0, len(ms)):
        missing_segm = ms[j]
        for image in missing_segm.keys():
            arr = missing_segm[image]
            if not sumHash.has_key(image):
                sumHash[image] = arr
                continue
            for i in range(len(arr)):
                miss_gt = arr[i]
                check = sumHash[image]
                hasGt = False
                for k in range(len(check)):
                    miss_gt2 = check[k]
                    if miss_gt == miss_gt2:
                        hasGt = True

                if not hasGt:
                    sumHash[image].append(miss_gt)

    missing_segm = ms[0]

    data = []
    dataf = []
    gt_id = 0
    columns = ['Img', 'GT Id']
    for image in sumHash.keys():
        arr = sumHash[image]
        f = None
        for i in range(len(arr)):
            orValue = False
            miss_gt = arr[i]
            row = []
            row.append(os.path.basename(image))
            row.append(gt_id)
            gt_id += 1
            rowf = []

            for j in np.arange(0, len(ms)):
                if gt_id == 1:
                    columns.append(dirs[j])
                msj = ms[j]
                hasSegmj = True
                val = 1
                if msj.has_key(image):
                    arrj = msj[image]
                    for k in range(len(arrj)):
                        miss_gtj = arrj[k]
                        if miss_gtj == miss_gt:
                            hasSegmj = False
                            val = 0
                            break

                row.append(hasSegmj)
                rowf.append(val)

                orValue = orValue or hasSegmj
            if orValue:
                rowf.append(1)

            else:
                rowf.append(0)
                if showPictures:
                    img = cv2.imread(image)
                    imgg = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
                    if f == None:

                        f, axes = plt.subplots(1, 2, figsize=(16, 3))
                        f.suptitle('Missing segmentation: {0}'.format(image))
                        ax = axes[0]
                        ax.imshow(img,
                                  cmap=pylab.gray(),
                                  interpolation='nearest')
                        ax = axes[1]
                        ax.imshow(imgg,
                                  cmap=pylab.gray(),
                                  interpolation='nearest')
                        orBox = miss_gt

                        segmentations = ft.getCharSegmentations(imgg)
                        keypoints = ft.getLastDetectionKeypoints()

                        style = 'rx'
                        for k in range(5):
                            maski = keypoints[:, 9] == k + 1
                            if k == 1:
                                style = "rv"
                            if k == 2:
                                style = "ro"
                            if k == 4:
                                style = "bo"

                            ax.plot(keypoints[maski, 0], keypoints[maski, 1],
                                    style)

                        for k in range(keypoints.shape[0]):
                            ax.plot([keypoints[k, 0], keypoints[k, 7]],
                                    [keypoints[k, 1], keypoints[k, 8]], 'r-')
                        ax = axes[0]

                    else:
                        orBox = utils.union(orBox, miss_gt)

                    line = mlines.Line2D(np.array([
                        miss_gt[0], miss_gt[2], miss_gt[2], miss_gt[0],
                        miss_gt[0]
                    ]),
                                         np.array([
                                             miss_gt[1], miss_gt[1],
                                             miss_gt[3], miss_gt[3], miss_gt[1]
                                         ]),
                                         lw=5.,
                                         alpha=0.6,
                                         color='r')
                    ax.add_line(line)

            row.append(orValue)

            data.append(row)
            dataf.append(rowf)

        if f != None:
            ax = axes[0]
            ax.set_xlim(orBox[0] - 20, orBox[2] + 20)
            ax.set_ylim(orBox[3] + 20, orBox[1] - 20)
            ax = axes[1]
            ax.set_xlim(orBox[0] - 20, orBox[2] + 20)
            ax.set_ylim(orBox[3] + 20, orBox[1] - 20)
            plt.show()

    columns.append("OR")
    data = np.array(data)
    dataf = np.array(dataf)

    df = pandas.DataFrame(data=data, columns=columns)
    #print(df)
    sumCols = dataf.sum(0)
    sumCols = dataf.shape[0] - sumCols
    print("Missing Segmentations:")
    print(sumCols)

    indices = np.argsort(sumCols)

    bestFactor = indices[1]
    missing_segm = ms[bestFactor]
    print("Best factor: {0}".format(dirs[bestFactor]))
    maskBest = dataf[:, bestFactor] == 0
    datafSec = dataf[maskBest, :]
    sumCols = datafSec.sum(0)
    sumCols = datafSec.shape[0] - sumCols

    print("Missing Segmentations 2 best:")
    print(sumCols)

    indices = np.argsort(sumCols)
    bestFactor2 = indices[1]
    print("Best factor 2: {0}, missing segmentations: {1} -> {2}".format(
        dirs[bestFactor2], datafSec.shape[0], sumCols[indices[1]]))

    maskBest = datafSec[:, bestFactor2] == 0
    dataf3 = datafSec[maskBest, :]
    sumCols = dataf3.sum(0)
    sumCols = dataf3.shape[0] - sumCols

    indices = np.argsort(sumCols)
    bestFactor2 = indices[1]
    print("Best factor 3: {0}, missing segmentations: {1} -> {2}".format(
        dirs[bestFactor2], dataf3.shape[0], sumCols[indices[1]]))
Exemple #32
0
import harris
from imtools import imresize
import datetime

WID = 5

im1 = np.array(Image.open('.\Resource\crans_1_small.jpg').convert('L'))
im2 = np.array(Image.open('.\Resource\crans_2_small.jpg').convert('L'))

im1 = imresize(im1, (im1.shape[1] / 2, im1.shape[0] / 2))
im2 = imresize(im2, (im2.shape[1] / 2, im2.shape[0] / 2))

harrisim1 = harris.compute_harris_response(im1, 5)
filtered_coords1 = harris.get_harris_points(harrisim1, WID + 1)
d1 = harris.get_descriptors(im1, filtered_coords1, WID)

harrisim2 = harris.compute_harris_response(im2, 5)
filtered_coords2 = harris.get_harris_points(harrisim2, WID + 1)
d2 = harris.get_descriptors(im2, filtered_coords2, WID)

print "start matching:{}".format(datetime.datetime.now())

matches = harris.match_twosided(d1, d2)

pl.figure()
pl.gray()
harris.plot_matches(im1, im2, filtered_coords1, filtered_coords2,
                    matches[:100])

print "finish matching:{}".format(datetime.datetime.now())
pl.show()
Exemple #33
0
vol_geom['option']['WindowMinX'] += shift
vol_geom['option']['WindowMaxX'] += shift

proj_id2 = astra.create_projector('cuda', proj_geom, vol_geom)
sinogram_id2, sinogram2 = astra.create_sino(P, proj_id2)

# Shift window in y direction
vol_geom['option']['WindowMinX'] -= shift
vol_geom['option']['WindowMaxX'] -= shift
vol_geom['option']['WindowMinY'] += shift
vol_geom['option']['WindowMaxY'] += shift

proj_id3 = astra.create_projector('cuda', proj_geom, vol_geom)
sinogram_id3, sinogram3 = astra.create_sino(P, proj_id3)

pylab.gray()
pylab.figure()
pylab.imshow(P)
pylab.figure()
pylab.imshow(sinogram1)
pylab.figure()
pylab.imshow(sinogram2)
pylab.figure()
pylab.imshow(sinogram3)
pylab.show()

# Free memory
astra.data2d.delete(sinogram_id1)
astra.projector.delete(proj_id1)
astra.data2d.delete(sinogram_id2)
astra.projector.delete(proj_id2)
Exemple #34
0
def Plot_state(t, suffix, SURVEY='PS1'):
    '''
   Make all the plots we need to assess the state of the tractor. Mainly, 
   a multi-panel figure of image, synthetic image and chi, for each image being 
   modelled.
   
   t is a Tractor object, containing a list of images.
   '''

    for i, image in enumerate(t.images):

        if image.name is None:
            imname = suffix + str(i)
        else:
            imname = image.name

        chi = t.getChiImage(i)

        if SURVEY == 'PS1':
            ima, chia, psfa = lenstractor.PS1_imshow_settings(image, chi)
        elif SURVEY == 'KIDS':
            ima, chia, psfa = lenstractor.KIDS_imshow_settings(image, chi)
        else:
            # Do the same as for PS1
            scale = np.sqrt(np.median(1.0 / image.invvar[image.invvar > 0.0]))
            ima = dict(interpolation='nearest',
                       origin='lower',
                       vmin=-100. * scale,
                       vmax=3. * scale)

            chia = dict(interpolation='nearest',
                        origin='lower',
                        vmin=-5.,
                        vmax=5.)

            psfa = dict(interpolation='nearest', origin='lower')

        fig = plt.figure(**figprops)
        fig.subplots_adjust(**adjustprops)
        plt.clf()
        plt.gray()

        plt.subplot(py, px, 1)
        plt.imshow(-image.data, **ima)
        tidyup_plot()
        plt.title('Observed image')

        model = t.getModelImages()[i]
        # print "lenstractor.Plot_state: minmax of model = ",np.min(model),np.max(model)
        plt.subplot(py, px, 2)
        plt.imshow(-model, **ima)
        tidyup_plot()
        plt.title('Predicted image')

        plt.subplot(py, px, 3)
        plt.imshow(-chi, **chia)
        tidyup_plot()
        if SURVEY == 'KIDS':
            # It is not clear why the residual image is not in units of
            # sigma. Perhaps this causes problems in the modelling.
            # This code is not refactored into kids.py since it should
            # not be necessary in the first place.
            plt.title('Residuals (flexible scale)')
        else:
            plt.title('Residuals ($\pm 5\sigma$)')

        psfimage = image.psf.getPointSourcePatch(*model.shape).patch
        plt.subplot(py, px, 4)
        plt.imshow(-psfimage, **psfa)
        tidyup_plot()
        plt.title('PSF')

        plt.savefig(imname + '_' + suffix + '.png')

    return
Exemple #35
0
    YY = np.linspace(0, 4096, 5)
    XX = np.linspace(0, 2048, 5)

    yims = []
    for y in YY:
        xims = []
        for x in XX:
            im = psf.instantiateAt(x, y)
            im /= im.sum()
            xims.append(im)
        xims = np.hstack(xims)
        yims.append(xims)
    yims = np.vstack(yims)
    plt.clf()
    plt.imshow(yims, origin='lower', interpolation='nearest')
    plt.gray()
    plt.hot()
    plt.title('instantiated')
    ps.savefig()

    for scale in [True, False]:
        print('fitting params, scale=', scale)
        psf.scale = scale
        psf.splines = None
        psf.ensureFit()

        sims = []
        for y in YY:
            xims = []
            for x in XX:
                mog = psf.mogAt(x, y)
Exemple #36
0
def train_test_plot(use_momentum=False,
                    use_sparsity=False,
                    for_comparison=False):
    """Entry point for script."""
    # load data
    train_x, test_x, train_y, test_y = load_mnist(onehot=True)
    trx = len(train_x)
    print('finished loading data')

    # initialize model
    x_mat = T.fmatrix('x')
    y_mat = T.fmatrix('d')
    np.random.seed(10)
    rng = np.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2**30))
    corruption_level = 0.1
    training_epochs = 25
    batch_size = 128
    if use_momentum:
        print('Using momentum term.')
    if use_sparsity:
        print('Using sparsity constraint.')

    # initialize weights and biases
    weight_1 = init_weights(28 * 28, 900)
    bias_1 = init_bias(900)
    weight_2 = init_weights(900, 625)
    bias_2 = init_bias(625)
    weight_3 = init_weights(625, 400)
    bias_3 = init_bias(400)
    # output reconstructed input from 3rd hidden layer
    weight_4 = init_weights(400, 28 * 28)
    bias_4 = init_bias(28 * 28)
    # softmax layer
    weight_5 = init_weights(400, 10)
    bias_5 = init_bias(10)

    # CORRUPT THE PURE
    tilde_x = theano_rng.binomial(size=x_mat.shape,
                                  n=1,
                                  p=1 - corruption_level,
                                  dtype=theano.config.floatX) * x_mat

    y_1 = T.nnet.sigmoid(T.dot(tilde_x, weight_1) + bias_1)
    y_2 = T.nnet.sigmoid(T.dot(y_1, weight_2) + bias_2)
    y_3 = T.nnet.sigmoid(T.dot(y_2, weight_3) + bias_3)

    # reconstruction layer
    z_1 = T.nnet.sigmoid(T.dot(y_3, weight_4) + bias_4)

    if use_sparsity:
        beta = 0.5
        rho = 0.05
        term_1 = -T.mean(
            T.sum(x_mat * T.log(z_1) + (1 - x_mat) * T.log(1 - z_1), axis=1))
        term_2 = beta * T.shape(y_3)[1] * (rho * T.log(rho) +
                                           (1 - rho) * T.log(1 - rho))
        term_3 = -beta * rho * T.sum(T.log(T.mean(y_3, axis=0) + 1e-6))
        term_4 = - beta * (1 - rho) * \
            T.sum(T.log(1 - T.mean(y_3, axis=0) + 1e-6))
        cost_1 = term_1 + term_2 + term_3 + term_4
    else:
        cost_1 = -T.mean(
            T.sum(x_mat * T.log(z_1) + (1 - x_mat) * T.log(1 - z_1), axis=1))

    params_1 = [
        weight_1, bias_1, weight_2, bias_2, weight_3, bias_3, weight_4, bias_4
    ]

    if use_momentum:
        updates_1 = sgd_momentum(cost_1, params_1)
    else:
        updates_1 = sgd(cost_1, params_1)

    train_da1 = theano.function(inputs=[x_mat],
                                outputs=cost_1,
                                updates=updates_1,
                                allow_input_downcast=True)
    test_da1 = theano.function(inputs=[x_mat],
                               outputs=[y_1, y_2, y_3, z_1],
                               allow_input_downcast=True)

    # softmax layer
    p_y_4 = T.nnet.softmax(T.dot(y_3, weight_5) + bias_5)
    y_4 = T.argmax(p_y_4, axis=1)
    cost_2 = T.mean(T.nnet.categorical_crossentropy(p_y_4, y_mat))
    params_2 = [
        weight_1, bias_1, weight_2, bias_2, weight_3, bias_3, weight_5, bias_5
    ]

    if use_momentum:
        updates_2 = sgd_momentum(cost_2, params_2)
    else:
        updates_2 = sgd(cost_2, params_2)

    train_ffn = theano.function(inputs=[x_mat, y_mat],
                                outputs=cost_2,
                                updates=updates_2,
                                allow_input_downcast=True)
    test_ffn = theano.function(inputs=[x_mat],
                               outputs=y_4,
                               allow_input_downcast=True)

    # stacked denoising autoencoder
    print('training dae1 ...')
    train_cost = []
    for _ in tqdm(range(training_epochs)):
        # go through trainng set
        cost = []
        for start, end in zip(range(0, trx, batch_size),
                              range(batch_size, trx, batch_size)):
            cost.append(train_da1(train_x[start:end]))
        train_cost.append(np.mean(cost, dtype='float64'))

    if for_comparison:
        # return training cost for comparison later
        comp_train_cost = train_cost
    else:
        # all the plottings
        pylab.figure()
        pylab.plot(range(training_epochs), train_cost)
        pylab.xlabel('iterations')
        pylab.ylabel('cross-entropy')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_train.png'))

        print('plotting weight samples')
        w_1 = weight_1.get_value()
        pylab.figure()
        pylab.gray()
        for i in tqdm(range(100)):
            pylab.subplot(10, 10, i + 1)
            pylab.axis('off')
            pylab.imshow(w_1[:, i].reshape(28, 28))
        pylab.suptitle('layer 1 weight samples')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_weight1.png'))

        w_2 = weight_2.get_value()
        pylab.figure()
        pylab.gray()
        for i in tqdm(range(100)):
            pylab.subplot(10, 10, i + 1)
            pylab.axis('off')
            pylab.imshow(w_2[:, i].reshape(30, 30))
        pylab.suptitle('layer 2 weight samples')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_weight2.png'))

        w_3 = weight_3.get_value()
        pylab.figure()
        pylab.gray()
        for i in tqdm(range(100)):
            pylab.subplot(10, 10, i + 1)
            pylab.axis('off')
            pylab.imshow(w_3[:, i].reshape(25, 25))
        pylab.suptitle('layer 3 weight samples')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_weight3.png'))

        ind = np.random.randint(low=0, high=1900)
        layer_1, layer_2, layer_3, output = test_da1(train_x[ind:ind + 100, :])

        # show input image
        print('plotting inputs...')
        pylab.figure()
        pylab.gray()
        for i in tqdm(range(100)):
            pylab.subplot(10, 10, i + 1)
            pylab.axis('off')
            pylab.imshow(train_x[ind + i:ind + i + 1, :].reshape(28, 28))
        pylab.suptitle('input images')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_input.png'))

        # hidden layer activations
        print('plotting hidden layer activations...')
        pylab.figure()
        pylab.gray()
        for i in tqdm(range(100)):
            pylab.subplot(10, 10, i + 1)
            pylab.axis('off')
            pylab.imshow(layer_1[i, :].reshape(30, 30))
        pylab.suptitle('layer 1 activations')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_layer1.png'))

        pylab.figure()
        pylab.gray()
        for i in tqdm(range(100)):
            pylab.subplot(10, 10, i + 1)
            pylab.axis('off')
            pylab.imshow(layer_2[i, :].reshape(25, 25))
        pylab.suptitle('layer 2 activations')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_layer2.png'))

        pylab.figure()
        pylab.gray()
        for i in tqdm(range(100)):
            pylab.subplot(10, 10, i + 1)
            pylab.axis('off')
            pylab.imshow(layer_3[i, :].reshape(20, 20))
        pylab.suptitle('layer 3 activations')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_layer3.png'))

        # reconstructed outputs
        print('plotting reconstructed outputs...')
        pylab.figure()
        pylab.gray()
        for i in tqdm(range(100)):
            pylab.subplot(10, 10, i + 1)
            pylab.axis('off')
            pylab.imshow(output[i, :].reshape(28, 28))
        pylab.suptitle('reconstructed outputs')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_output.png'))

    # softmax
    print('training ffn ...')
    train_cost = []
    test_accr = []
    for _ in tqdm(range(training_epochs)):
        # go through trainng set
        cost = []
        for start, end in zip(range(0, trx, batch_size),
                              range(batch_size, trx, batch_size)):
            cost.append(train_ffn(train_x[start:end], train_y[start:end]))
        train_cost.append(np.mean(cost, dtype='float64'))
        test_accr.append(
            np.mean(np.argmax(test_y, axis=1) == test_ffn(test_x)))
    # output max accuracy at # iterations
    print('%.1f accuracy at %d iterations' %
          (np.max(test_accr) * 100, np.argmax(test_accr) + 1))

    if for_comparison:
        # return arrays for plotting later
        return comp_train_cost, train_cost, test_accr
    else:
        # all the plottings
        pylab.figure()
        pylab.plot(range(training_epochs), train_cost)
        pylab.xlabel('iterations')
        pylab.ylabel('cross-entropy')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_train_ffn.png'))

        pylab.figure()
        pylab.plot(range(training_epochs), test_accr)
        pylab.xlabel('iterations')
        pylab.ylabel('test accuracy')
        pylab.savefig(os.path.join(CUR_DIR, 'project_2b_test_ffn.png'))

        pylab.show()
Exemple #37
0
    def train(self):

        # Load decoder weight to GAN's discriminator
        # self.combined.summary()
        # self.combined = load_model('CAE_weights.h5')


        color_mode = 'grayscale' if self.channels == 1 else 'rgb'

        train_datagen = ImageDataGenerator(rescale=1./255)

        train_set     = train_datagen.flow_from_directory(self.train_dir,
                                                          target_size=[self.img_rows, self.img_cols],
                                                          batch_size=self.batch_size,
                                                          color_mode=color_mode,    # 'rgb'
                                                          class_mode='binary')

        test_set     = train_datagen.flow_from_directory(self.test_dir,
                                                          target_size=[self.img_rows, self.img_cols],
                                                          batch_size=self.batch_size,
                                                          color_mode=color_mode,    # 'rgb'
                                                          class_mode='binary')

        epoch = 0
        i = 0

        for x_batch, y_label in train_set:



            x_batch = x_batch - 0.5
            y_label = y_label.reshape(-1, 1)


            # For N class, real / fake has 1:1
            half_batch = self.batch_size // 2
            cw1 = {0: 1, 1: 1}

            # For N class, each class has 'K/N' images while there are total 'K' fake images
            # So, each 'real img' has 'N' times more weight than 'fake img' (--> weight = 1 / frequency)
            cw2 = {i:  self.num_class / half_batch for i in range(self.num_class)}
            cw2[self.num_class] = 1 / half_batch



            # Adversarial ground truths
            # Use x_batch.shape[0] instead of batch_size because of last batch's size ( < batch_size)

            valid = np.ones((x_batch.shape[0], 1))
            fake = np.zeros((x_batch.shape[0], 1))








            # ---------------------
            #  Train Discriminator
            # ---------------------
            self.discriminator.trainable = True
            noise = np.random.uniform(-1, 1, (x_batch.shape[0], self.latent_dim))


            # Generate a batch of new images
            gen_imgs = self.generator.predict(noise)


            # One-hot encoding of labels
            # fake_label will be one-hot encoded to 'N+1'th column
            labels      = to_categorical(y_label, num_classes=self.num_class+1)
            fake_labels = to_categorical(np.full((x_batch.shape[0], 1), self.num_class), num_classes=self.num_class+1)


            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(x_batch, [valid, labels], class_weight=[cw1, cw2])
            d_loss_fake = self.discriminator.train_on_batch(gen_imgs, [fake, fake_labels], class_weight=[cw1, cw2])
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)









            # ---------------------
            #  Train Generator
            # ---------------------

            # Train the generator (to have the discriminator label samples as valid)
            self.discriminator.trainable = False


            g_loss = self.combined.train_on_batch(noise, valid, class_weight=[cw1, cw2])


            # Plot the progress
            d_lr = K.eval(self.discriminator.optimizer.lr)
            g_lr = K.eval(self.combined.optimizer.lr)


            print ("epoch : %d   step : %d / %d    [D loss: %f, acc.: %.2f%%, op_acc: %.2f%%] [G loss: %f]  [d_lr : %f]  [g_lr :%f]"
                 % (epoch, i, train_set.__len__(), d_loss[0], 100*d_loss[3], 100*d_loss[4], g_loss, d_lr, g_lr))


            # If at save interval => save generated image samples
            if (i + 1) % 20 == 0:
                noise = np.random.uniform(-1, 1, (x_batch.shape[0], self.latent_dim))

                img = self.generator.predict(noise)
                img = 0.5 * img + 0.5

                fig = plt.gcf()
                fig.set_size_inches(3, 3)


                if self.channels == 1:
                    img = img[0].squeeze(axis=2)
                    #img = cv2.GaussianBlur(img, (3, 3), 0)
                    plt.imshow(img, cmap=pylab.gray())  # if gray scale :img[0].squeeze(axis=2), cmap=pylab.gray()
                else:
                    plt.imshow(img[0])

                plt.show(block=False)
                plt.pause(0.001)



            if (i + 1) % 100 == 0:
                plt.savefig("result_{}_{}.png.".format(str(epoch), str(i)))
                plt.close()

                # Test the discriminator
                x_batch_t, y_labels_t = test_set.next()
                labels_t = to_categorical(y_labels_t, num_classes=self.num_class + 1)

                print(self.discriminator.metrics_names)
                print(self.discriminator.evaluate(x_batch_t, [valid, labels_t], verbose=1))


            if (i + 1) % train_set.__len__() == 0:
                epoch += 1
                i = 0

            i += 1
Exemple #38
0
def tt_plot_func(train_x, train_y, test_x, test_y, func=sgd):
    """Train, test and plot using a particular update function.

    Arguments:
        train_x, train_y, test_x, test_y: train and test data
        func: update function to use, default to nn_cnn.sgd
    """
    # train and test
    train, predict, test = cnn(update_func=func)
    test_accr = []
    train_cost = []
    for i in tqdm(range(NO_ITERS)):
        train_x, train_y = shuffle_data(train_x, train_y)
        test_x, test_y = shuffle_data(test_x, test_y)
        cost = 0.0
        train_length = len(train_x)

        starts = range(0, train_length, BATCH_SIZE)
        ends = range(BATCH_SIZE, train_length, BATCH_SIZE)
        for start, end in zip(starts, ends):
            cost += train(train_x[start:end], train_y[start:end])

        # average out the cost for one epoch
        cost = cost / (train_length // BATCH_SIZE)
        train_cost += [cost]
        test_accr.append(np.mean(np.argmax(test_y, axis=1) == predict(test_x)))

    # output max accuracy at # iterations
    print('%.1f accuracy at %d iterations' %
          (np.max(test_accr) * 100, np.argmax(test_accr) + 1))

    # plot test accuracy
    pylab.figure()
    pylab.plot(range(NO_ITERS), test_accr)
    pylab.xlabel('epochs')
    pylab.ylabel('test accuracy')
    pylab.savefig(os.path.join(CUR_DIR, 'project_2a_test.png'))

    # plot training cost
    pylab.figure()
    pylab.plot(range(NO_ITERS), train_cost)
    pylab.xlabel('epochs')
    pylab.ylabel('training cost')
    pylab.savefig(os.path.join(CUR_DIR, 'project_2a_train.png'))

    # pick a random image
    ind = np.random.randint(low=0, high=2000)
    conv_1, pool_1, conv_2, pool_2 = test(test_x[ind:ind + 1, :])

    # show input image
    pylab.figure()
    pylab.gray()
    pylab.axis('off')
    pylab.imshow(test_x[ind, :].reshape(28, 28))
    pylab.title('input image')
    pylab.savefig(os.path.join(CUR_DIR, 'img_input.png'))

    # show convolved and pooled feature maps
    pylab.figure()
    pylab.gray()
    for i in range(15):
        pylab.subplot(3, 5, i + 1)
        pylab.axis('off')
        pylab.imshow(conv_1[0, i, :].reshape(20, 20))
    pylab.suptitle('layer 1 convolved feature maps')
    pylab.savefig(os.path.join(CUR_DIR, 'img_conv_1.png'))

    pylab.figure()
    pylab.gray()
    for i in range(15):
        pylab.subplot(3, 5, i + 1)
        pylab.axis('off')
        pylab.imshow(pool_1[0, i, :].reshape(10, 10))
    pylab.suptitle('layer 1 pooled feature maps')
    pylab.savefig(os.path.join(CUR_DIR, 'img_pooled_1.png'))

    pylab.figure()
    pylab.gray()
    for i in range(20):
        pylab.subplot(4, 5, i + 1)
        pylab.axis('off')
        pylab.imshow(conv_2[0, i, :].reshape(6, 6))
    pylab.suptitle('layer 2 convolved feature maps')
    pylab.savefig(os.path.join(CUR_DIR, 'img_conv_2.png'))

    pylab.figure()
    pylab.gray()
    for i in range(20):
        pylab.subplot(4, 5, i + 1)
        pylab.axis('off')
        pylab.imshow(pool_2[0, i, :].reshape(3, 3))
    pylab.suptitle('layer 2 pooled feature maps')
    pylab.savefig(os.path.join(CUR_DIR, 'img_pooled_2.png'))
    pylab.show()
Exemple #39
0
def main():
    '''
    Runs the deblender and creates plots for the "design document",
    doc/design.tex.  See the file NOTES for how to get set up to the
    point where you can actually run this on data.
    '''

    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option('--root',
                      dest='root',
                      help='Root directory for Subaru data')
    parser.add_option('--outroot',
                      '-o',
                      dest='outroot',
                      help='Output root directory for Subaru data')
    parser.add_option('--sources', help='Read a FITS table of sources')
    parser.add_option('--calexp', help='Read a FITS calexp')
    parser.add_option('--psf', help='Read a FITS PSF')

    parser.add_option('--drill',
                      '-D',
                      dest='drill',
                      action='append',
                      type=str,
                      default=[],
                      help='Drill down on individual source IDs')
    parser.add_option(
        '--drillxy',
        dest='drillxy',
        action='append',
        type=str,
        default=[],
        help='Drill down on individual source positions, eg 132,46;54,67')
    parser.add_option('--visit',
                      dest='visit',
                      type=int,
                      default=108792,
                      help='Suprimecam visit id')
    parser.add_option('--ccd',
                      dest='ccd',
                      type=int,
                      default=5,
                      help='Suprimecam CCD number')
    parser.add_option('--prefix',
                      dest='prefix',
                      default='design-',
                      help='plot filename prefix')
    parser.add_option('--suffix',
                      dest='suffix',
                      default=None,
                      help='plot filename suffix (default: ".png")')
    parser.add_option(
        '--pat',
        dest='pat',
        help=
        'Plot filename pattern: eg, "design-%(pid)04i-%(name).png"; overrides --prefix and --suffix'
    )
    parser.add_option('--pdf',
                      dest='pdf',
                      action='store_true',
                      default=False,
                      help='save in PDF format?')
    parser.add_option('-v', dest='verbose', action='store_true')
    parser.add_option('--figw',
                      dest='figw',
                      type=float,
                      help='Figure window width (inches)',
                      default=4.)
    parser.add_option('--figh',
                      dest='figh',
                      type=float,
                      help='Figure window height (inches)',
                      default=4.)
    parser.add_option('--order',
                      dest='order',
                      type=str,
                      help='Child order: eg 3,0,1,2')

    parser.add_option('--sdss',
                      dest='sec',
                      action='store_const',
                      const='sdss',
                      help='Produce plots for the SDSS section.')
    parser.add_option('--mono',
                      dest='sec',
                      action='store_const',
                      const='mono',
                      help='Produce plots for the "monotonic" section.')
    parser.add_option('--median',
                      dest='sec',
                      action='store_const',
                      const='median',
                      help='Produce plots for the "median filter" section.')
    parser.add_option('--ramp',
                      dest='sec',
                      action='store_const',
                      const='ramp',
                      help='Produce plots for the "ramp edges" section.')
    parser.add_option(
        '--ramp2',
        dest='sec',
        action='store_const',
        const='ramp2',
        help='Produce plots for the "ramp edges + stray flux" section.')
    parser.add_option('--patch',
                      dest='sec',
                      action='store_const',
                      const='patch',
                      help='Produce plots for the "patch edges" section.')

    opt, args = parser.parse_args()

    # Logging
    if opt.verbose:
        lsst.log.setLevel('', lsst.log.DEBUG)
    else:
        lsst.log.setLevel('', lsst.log.INFO)

    if opt.sec is None:
        opt.sec = 'sdss'
    if opt.pdf:
        if opt.suffix is None:
            opt.suffix = ''
        opt.suffix += '.pdf'
    if not opt.suffix:
        opt.suffix = '.png'

    if opt.pat:
        plotpattern = opt.pat
    else:
        plotpattern = opt.prefix + '%(pid)04i-%(name)s' + opt.suffix

    if opt.order is not None:
        opt.order = [int(x) for x in opt.order.split(',')]
        invorder = np.zeros(len(opt.order))
        invorder[opt.order] = np.arange(len(opt.order))

    def mapchild(i):
        if opt.order is None:
            return i
        return invorder[i]

    def savefig(pid, figname):
        fn = plotpattern % dict(pid=pid, name=figname)
        plt.savefig(fn)

    # Load data using the butler, if desired
    dr = None
    if opt.sources is None or opt.calexp is None:
        print('Creating DataRef...')
        dr = getSuprimeDataref(opt.visit,
                               opt.ccd,
                               rootdir=opt.root,
                               outrootdir=opt.outroot)
        print('Got', dr)

    # Which parent ids / deblend families are we going to plot?
    keepids = None
    if len(opt.drill):
        keepids = []
        for d in opt.drill:
            for dd in d.split(','):
                keepids.append(int(dd))
        print('Keeping parent ids', keepids)

    keepxys = None
    if len(opt.drillxy):
        keepxys = []
        for d in opt.drillxy:
            for dd in d.split(';'):
                xy = dd.split(',')
                assert (len(xy) == 2)
                keepxys.append((int(xy[0]), int(xy[1])))
        print('Keeping parents at xy', keepxys)

    # Read from butler or local file
    cat = readCatalog(opt.sources,
                      None,
                      dataref=dr,
                      keepids=keepids,
                      keepxys=keepxys,
                      patargs=dict(visit=opt.visit, ccd=opt.ccd))
    print('Got', len(cat), 'sources')

    # Load data from butler or local files
    if opt.calexp is not None:
        print('Reading exposure from', opt.calexp)
        exposure = afwImage.ExposureF(opt.calexp)
    else:
        exposure = dr.get('calexp')
    print('Exposure', exposure)
    mi = exposure.getMaskedImage()

    if opt.psf is not None:
        print('Reading PSF from', opt.psf)
        psf = afwDet.Psf.readFits(opt.psf)
        print('Got', psf)
    elif dr:
        psf = dr.get('psf')
    else:
        psf = exposure.getPsf()

    sigma1 = get_sigma1(mi)

    fams = getFamilies(cat)
    print(len(fams), 'deblend families')

    if False:
        for j, (parent, children) in enumerate(fams):
            print('parent', parent)
            print('children', children)
            plotDeblendFamily(mi,
                              parent,
                              children,
                              cat,
                              sigma1,
                              ellipses=False)
            fn = '%04i.png' % parent.getId()
            plt.savefig(fn)
            print('wrote', fn)

    def nlmap(X):
        return np.arcsinh(X / (3. * sigma1))

    def myimshow(im, **kwargs):
        kwargs = kwargs.copy()
        mn = kwargs.get('vmin', -5 * sigma1)
        kwargs['vmin'] = nlmap(mn)
        mx = kwargs.get('vmax', 100 * sigma1)
        kwargs['vmax'] = nlmap(mx)
        plt.imshow(nlmap(im), **kwargs)

    plt.figure(figsize=(opt.figw, opt.figh))
    plt.subplot(1, 1, 1)
    plt.subplots_adjust(left=0.01,
                        right=0.99,
                        bottom=0.01,
                        top=0.99,
                        wspace=0.05,
                        hspace=0.1)

    # Make plots for each deblend family.

    for j, (parent, children) in enumerate(fams):
        print('parent', parent.getId())
        print('children', [ch.getId() for ch in children])
        print('parent x,y', parent.getX(), parent.getY())

        pid = parent.getId()
        fp = parent.getFootprint()
        bb = fp.getBBox()
        pim = footprintToImage(parent.getFootprint(), mi).getArray()
        pext = getExtent(bb)
        imargs = dict(interpolation='nearest',
                      origin='lower',
                      vmax=pim.max() * 0.95,
                      vmin=-3. * sigma1)
        pksty = dict(linestyle='None',
                     marker='+',
                     color='r',
                     mew=3,
                     ms=20,
                     alpha=0.6)

        plt.clf()
        myimshow(afwImage.ImageF(mi.getImage(), bb).getArray(), **imargs)
        plt.gray()
        plt.xticks([])
        plt.yticks([])
        savefig(pid, 'image')

        # Parent footprint
        plt.clf()
        myimshow(pim, extent=pext, **imargs)
        plt.gray()
        pks = fp.getPeaks()
        plt.plot([pk.getIx() for pk in pks], [pk.getIy() for pk in pks],
                 **pksty)
        plt.xticks([])
        plt.yticks([])
        plt.axis(pext)
        savefig(pid, 'parent')

        from lsst.meas.deblender.baseline import deblend

        xc = int((bb.getMinX() + bb.getMaxX()) / 2.)
        yc = int((bb.getMinY() + bb.getMaxY()) / 2.)
        if hasattr(psf, 'getFwhm'):
            psf_fwhm = psf.getFwhm(xc, yc)
        else:
            psf_fwhm = psf.computeShape().getDeterminantRadius() * 2.35

        # Each section of the design doc runs the deblender with different args.

        kwargs = dict(sigma1=sigma1, verbose=opt.verbose, getTemplateSum=True)

        basic = kwargs.copy()
        basic.update(fit_psfs=False,
                     median_smooth_template=False,
                     monotonic_template=False,
                     lstsq_weight_templates=False,
                     assignStrayFlux=False,
                     rampFluxAtEdge=False,
                     patchEdges=False)

        if opt.sec == 'sdss':
            # SDSS intro
            kwargs = basic
            kwargs.update(lstsq_weight_templates=True)

        elif opt.sec == 'mono':
            kwargs = basic
            kwargs.update(lstsq_weight_templates=True, monotonic_template=True)
        elif opt.sec == 'median':
            kwargs = basic
            kwargs.update(lstsq_weight_templates=True,
                          median_smooth_template=True,
                          monotonic_template=True)
        elif opt.sec == 'ramp':
            kwargs = basic
            kwargs.update(median_smooth_template=True,
                          monotonic_template=True,
                          rampFluxAtEdge=True)

        elif opt.sec == 'ramp2':
            kwargs = basic
            kwargs.update(median_smooth_template=True,
                          monotonic_template=True,
                          rampFluxAtEdge=True,
                          assignStrayFlux=True)

        elif opt.sec == 'patch':
            kwargs = basic
            kwargs.update(median_smooth_template=True,
                          monotonic_template=True,
                          patchEdges=True)

        else:
            raise 'Unknown section: "%s"' % opt.sec

        print('Running deblender with kwargs:', kwargs)
        res = deblend(fp, mi, psf, psf_fwhm, **kwargs)
        # print('got result with', [x for x in dir(res) if not x.startswith('__')])
        # for pk in res.peaks:
        #     print('got peak with', [x for x in dir(pk) if not x.startswith('__')])
        #     print('  deblend as psf?', pk.deblend_as_psf)

        # Find bounding-box of all templates.
        tbb = fp.getBBox()
        for pkres, pk in zip(res.peaks, pks):
            tbb.include(pkres.template_foot.getBBox())
        print('Bounding-box of all templates:', tbb)

        # Sum-of-templates plot
        tsum = np.zeros((tbb.getHeight(), tbb.getWidth()))
        tx0, ty0 = tbb.getMinX(), tbb.getMinY()

        # Sum-of-deblended children plot(s)
        # "heavy" bbox == template bbox.
        hsum = np.zeros((tbb.getHeight(), tbb.getWidth()))
        hsum2 = np.zeros((tbb.getHeight(), tbb.getWidth()))

        # Sum of templates from the deblender itself
        plt.clf()
        t = res.templateSum
        myimshow(t.getArray(), extent=getExtent(t.getBBox()), **imargs)
        plt.gray()
        plt.xticks([])
        plt.yticks([])
        savefig(pid, 'tsum1')

        # Make plots for each deblended child (peak)

        k = 0
        for pkres, pk in zip(res.peaks, pks):

            heavy = pkres.get_flux_portion()
            if heavy is None:
                print('Child has no HeavyFootprint -- skipping')
                continue

            kk = mapchild(k)

            w = pkres.template_weight

            cfp = pkres.template_foot
            cbb = cfp.getBBox()
            cext = getExtent(cbb)

            # Template image
            tim = pkres.template_mimg.getImage()
            timext = cext
            tim = tim.getArray()

            (x0, x1, y0, y1) = timext
            print('tim ext', timext)
            tsum[y0 - ty0:y1 - ty0, x0 - tx0:x1 - tx0] += tim

            # "Heavy" image -- flux assigned to child
            him = footprintToImage(heavy).getArray()
            hext = getExtent(heavy.getBBox())

            (x0, x1, y0, y1) = hext
            hsum[y0 - ty0:y1 - ty0, x0 - tx0:x1 - tx0] += him

            # "Heavy" without stray flux
            h2 = pkres.get_flux_portion(strayFlux=False)
            him2 = footprintToImage(h2).getArray()
            hext2 = getExtent(h2.getBBox())
            (x0, x1, y0, y1) = hext2
            hsum2[y0 - ty0:y1 - ty0, x0 - tx0:x1 - tx0] += him2

            if opt.sec == 'median':
                try:
                    med = pkres.median_filtered_template
                except Exception:
                    med = pkres.orig_template

                for im, nm in [(pkres.orig_template, 'symm'), (med, 'med')]:
                    # print('im:', im)
                    plt.clf()
                    myimshow(im.getArray(), extent=cext, **imargs)
                    plt.gray()
                    plt.xticks([])
                    plt.yticks([])
                    plt.plot([pk.getIx()], [pk.getIy()], **pksty)
                    plt.axis(pext)
                    savefig(pid, nm + '%i' % (kk))

            # Template
            plt.clf()
            myimshow(pkres.template_mimg.getImage().getArray() / w,
                     extent=cext,
                     **imargs)
            plt.gray()
            plt.xticks([])
            plt.yticks([])
            plt.plot([pk.getIx()], [pk.getIy()], **pksty)
            plt.axis(pext)
            savefig(pid, 't%i' % (kk))

            # Weighted template
            plt.clf()
            myimshow(tim, extent=cext, **imargs)
            plt.gray()
            plt.xticks([])
            plt.yticks([])
            plt.plot([pk.getIx()], [pk.getIy()], **pksty)
            plt.axis(pext)
            savefig(pid, 'tw%i' % (kk))

            # "Heavy"
            plt.clf()
            myimshow(him, extent=hext, **imargs)
            plt.gray()
            plt.xticks([])
            plt.yticks([])
            plt.plot([pk.getIx()], [pk.getIy()], **pksty)
            plt.axis(pext)
            savefig(pid, 'h%i' % (kk))

            # Original symmetric template
            plt.clf()
            t = pkres.orig_template
            foot = pkres.orig_foot
            myimshow(t.getArray(), extent=getExtent(foot.getBBox()), **imargs)
            plt.gray()
            plt.xticks([])
            plt.yticks([])
            plt.plot([pk.getIx()], [pk.getIy()], **pksty)
            plt.axis(pext)
            savefig(pid, 'o%i' % (kk))

            if opt.sec == 'patch' and pkres.patched:
                pass

            if opt.sec in ['ramp', 'ramp2'] and pkres.has_ramped_template:

                # Ramped template
                plt.clf()
                t = pkres.ramped_template
                myimshow(t.getArray(), extent=getExtent(t.getBBox()), **imargs)
                plt.gray()
                plt.xticks([])
                plt.yticks([])
                plt.plot([pk.getIx()], [pk.getIy()], **pksty)
                plt.axis(pext)
                savefig(pid, 'r%i' % (kk))

                # Median-filtered template
                plt.clf()
                t = pkres.median_filtered_template
                myimshow(t.getArray(), extent=getExtent(t.getBBox()), **imargs)
                plt.gray()
                plt.xticks([])
                plt.yticks([])
                plt.plot([pk.getIx()], [pk.getIy()], **pksty)
                plt.axis(pext)
                savefig(pid, 'med%i' % (kk))

                # Assigned flux
                plt.clf()
                t = pkres.portion_mimg.getImage()
                myimshow(t.getArray(), extent=getExtent(t.getBBox()), **imargs)
                plt.gray()
                plt.xticks([])
                plt.yticks([])
                plt.plot([pk.getIx()], [pk.getIy()], **pksty)
                plt.axis(pext)
                savefig(pid, 'p%i' % (kk))

            if opt.sec == 'ramp2':
                # stray flux
                if pkres.stray_flux is not None:
                    s = pkres.stray_flux
                    strayim = footprintToImage(s).getArray()
                    strayext = getExtent(s.getBBox())

                    plt.clf()
                    myimshow(strayim, extent=strayext, **imargs)
                    plt.gray()
                    plt.xticks([])
                    plt.yticks([])
                    plt.plot([pk.getIx()], [pk.getIy()], **pksty)
                    plt.axis(pext)
                    savefig(pid, 's%i' % (kk))

                    # Assigned flux, omitting stray flux.
                    plt.clf()
                    myimshow(him2, extent=hext2, **imargs)
                    plt.gray()
                    plt.xticks([])
                    plt.yticks([])
                    plt.plot([pk.getIx()], [pk.getIy()], **pksty)
                    plt.axis(pext)
                    savefig(pid, 'hb%i' % (kk))

            k += 1

        # sum of templates
        plt.clf()
        myimshow(tsum, extent=getExtent(tbb), **imargs)
        plt.gray()
        plt.xticks([])
        plt.yticks([])
        plt.plot([pk.getIx() for pk in pks], [pk.getIy() for pk in pks],
                 **pksty)
        plt.axis(pext)
        savefig(pid, 'tsum')

        # sum of assigned flux
        plt.clf()
        myimshow(hsum, extent=getExtent(tbb), **imargs)
        plt.gray()
        plt.xticks([])
        plt.yticks([])
        plt.plot([pk.getIx() for pk in pks], [pk.getIy() for pk in pks],
                 **pksty)
        plt.axis(pext)
        savefig(pid, 'hsum')

        plt.clf()
        myimshow(hsum2, extent=getExtent(tbb), **imargs)
        plt.gray()
        plt.xticks([])
        plt.yticks([])
        plt.plot([pk.getIx() for pk in pks], [pk.getIy() for pk in pks],
                 **pksty)
        plt.axis(pext)
        savefig(pid, 'hsum2')

        k = 0
        for pkres, pk in zip(res.peaks, pks):
            heavy = pkres.get_flux_portion()
            if heavy is None:
                continue

            print('Template footprint:', pkres.template_foot.getBBox())
            print('Template img:', pkres.template_mimg.getBBox())
            print('Heavy footprint:', heavy.getBBox())

            cfp = pkres.template_foot
            cbb = cfp.getBBox()
            cext = getExtent(cbb)
            tim = pkres.template_mimg.getImage().getArray()
            (x0, x1, y0, y1) = cext

            frac = tim / tsum[y0 - ty0:y1 - ty0, x0 - tx0:x1 - tx0]

            msk = afwImage.ImageF(cbb.getWidth(), cbb.getHeight())
            msk.setXY0(cbb.getMinX(), cbb.getMinY())
            afwDet.setImageFromFootprint(msk, cfp, 1.)
            msk = msk.getArray()
            frac[msk == 0.] = np.nan

            # Fraction of flux assigned to this child.
            plt.clf()
            plt.imshow(frac,
                       extent=cext,
                       interpolation='nearest',
                       origin='lower',
                       vmin=0,
                       vmax=1)
            # plt.plot([x0,x0,x1,x1,x0], [y0,y1,y1,y0,y0], 'k-')
            plt.gray()
            plt.xticks([])
            plt.yticks([])
            plt.plot([pk.getIx()], [pk.getIy()], **pksty)
            plt.gca().set_axis_bgcolor((0.9, 0.9, 0.5))
            plt.axis(pext)
            savefig(pid, 'f%i' % (mapchild(k)))

            k += 1
Exemple #40
0
def main():
    # Import data
    mnist = input_data.read_data_sets('../data/mnist', one_hot=True)
    trainX, trainY = mnist.train.images[:12000], mnist.train.labels[:12000]
    testX, testY = mnist.test.images[:2000], mnist.test.labels[:2000]

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])

    # Build the graph for the deep net
    W_conv1, h_conv1, h_pool1, h_conv2, h_pool2, y_conv, keep_prob = cnn(x)

    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                            logits=y_conv)
    cross_entropy = tf.reduce_mean(cross_entropy)

    train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(
        cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)

    N = len(trainX)
    idx = np.arange(N)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        test_acc = []
        for i in range(no_epochs):
            np.random.shuffle(idx)
            trainX, trainY = trainX[idx], trainY[idx]

            for start, end in zip(range(0, N, batch_size),
                                  range(batch_size, N, batch_size)):
                train_step.run(feed_dict={
                    x: trainX[start:end],
                    y_: trainY[start:end],
                    keep_prob: 0.5
                })

            test_acc.append(
                accuracy.eval(feed_dict={
                    x: testX,
                    y_: testY,
                    keep_prob: 1.0
                }))
            print('iter %d: test accuracy %g' % (i, test_acc[i]))

        pylab.figure()
        pylab.plot(np.arange(no_epochs), test_acc, label='gradient descent')
        pylab.xlabel('epochs')
        pylab.ylabel('test accuracy')
        pylab.legend(loc='lower right')
        pylab.savefig('./figures/7.3_1.png')

        W_conv1_ = sess.run(W_conv1)
        W_conv1_ = np.array(W_conv1_)
        pylab.figure()
        pylab.gray()
        for i in range(32):
            pylab.subplot(8, 4, i + 1)
            pylab.axis('off')
            pylab.imshow(W_conv1_[:, :, 0, i])
        pylab.savefig('./figures/7.3_2.png')

        ind = np.random.randint(low=0, high=55000)
        X = mnist.train.images[ind, :]

        pylab.figure()
        pylab.gray()
        pylab.axis('off')
        pylab.imshow(X.reshape(28, 28))
        pylab.savefig('./figures/7.3_3.png')

        h_conv1_, h_pool1_, h_conv2_, h_pool2_ = sess.run(
            [h_conv1, h_pool1, h_conv2, h_pool2], {x: X.reshape(1, 784)})
        pylab.figure()
        pylab.gray()
        h_conv1_ = np.array(h_conv1_)
        for i in range(32):
            pylab.subplot(4, 8, i + 1)
            pylab.axis('off')
            pylab.imshow(h_conv1_[0, :, :, i])
        pylab.savefig('./figures/7.3_4.png')

        pylab.figure()
        pylab.gray()
        h_pool1_ = np.array(h_pool1_)
        for i in range(32):
            pylab.subplot(4, 8, i + 1)
            pylab.axis('off')
            pylab.imshow(h_pool1_[0, :, :, i])
        pylab.savefig('./figures/7.3_5.png')

        pylab.figure()
        pylab.gray()
        h_conv2_ = np.array(h_conv2_)
        for i in range(64):
            pylab.subplot(8, 8, i + 1)
            pylab.axis('off')
            pylab.imshow(h_conv2_[0, :, :, i])
        pylab.savefig('figures/7.3_6.png')

        pylab.figure()
        pylab.gray()
        h_pool2_ = np.array(h_pool2_)
        for i in range(64):
            pylab.subplot(8, 8, i + 1)
            pylab.axis('off')
            pylab.imshow(h_pool2_[0, :, :, i])
        pylab.savefig('./figures/7.3_7.png')

        pylab.show()
Exemple #41
0
def plot(scorelists, output, qrange=0.1, labels=None, publish=False, ylim=0.0):
    linestyle = ['-', '-', '-', '-', '-', '-', '-', '-', '-']
    linecolors = [(0.0, 0.0, 0.0), (0.8, 0.4, 0.0), (0.0, 0.45, 0.70),
                  (0.8, 0.6, 0.7), (0.0, 0.6, 0.5), (0.9, 0.6, 0.0),
                  (0.95, 0.9, 0.25), (0.35, 0.7, 0.9), (0.43, 0.17, 0.60)]

    # linestyle = [ '-', '-', '-', '-', '-', '--', '-', '-', '-' ]
    # linecolors = [  (0.0, 0.0, 0.0),
    #                 (0.8, 0.4, 0.0),
    #                 (0.35, 0.7, 0.9),
    #                 (0.8, 0.6, 0.7),
    #                 (0.0, 0.6, 0.5),
    #                 (0.9, 0.6, 0.0),
    #                 (0.95, 0.9, 0.25),
    #                 (0.35, 0.7, 0.9),
    #                 (0.43, 0.17, 0.60)]

    if publish:
        matplotlib.rcParams['text.usetex'] = True
        matplotlib.rcParams['font.size'] = 14
        matplotlib.rcParams['legend.fontsize'] = 20
        matplotlib.rcParams['xtick.labelsize'] = 24
        matplotlib.rcParams['ytick.labelsize'] = 24
        matplotlib.rcParams['axes.labelsize'] = 22

    xlabel = 'q-value'
    ylabel = 'Significant PSMs'
    pylab.clf()
    pylab.xlabel(xlabel)
    pylab.ylabel(ylabel)
    pylab.gray()

    h = -1
    for i, (q, p) in enumerate(scorelists):
        h = max(itertools.chain([h], (b for a, b in zip(q, p) if a <= qrange)))
        pylab.plot(q,
                   p,
                   color=linecolors[i],
                   linewidth=2,
                   linestyle=linestyle[i])

    # if publish:
    #     yt, _ = pylab.yticks()
    #     if all(v % 1000 == 0 for v in yt):
    #         # yl = list('$%d$' % int(v/1000)  for v in yt)
    #         yl = []
    #         ytt = []
    #         for v in yt:
    #             if v < h:
    #                 print(v)
    #                 print(float(v)/float(1000))
    #                 yl.append('$%f$' % float(v)/float(1000))
    #                 ytt.append(v)
    #         pylab.yticks(ytt, yl)
    #         pylab.ylabel(ylabel + ' (in 1000\'s)')

    pylab.xlim([0, qrange])

    pylab.ylim([ylim, h])

    pylab.legend(labels, loc='lower right', fontsize=20)
    print("Evaluated %d methods, saving plot to %s" % (len(labels), output))
    pylab.savefig(output, bbox_inches='tight')
def main():

    trainX, trainY = load_data('data_batch_1')
    print(trainX.shape, trainY.shape)
    
    testX, testY = load_data('test_batch_trim')
    print(testX.shape, testY.shape)

    #Scaling the data
    testX = (testX - np.min(trainX, axis = 0))/np.max(trainX, axis = 0)
    trainX = (trainX - np.min(trainX, axis = 0))/np.max(trainX, axis = 0)

    # Create the model
    x = tf.placeholder(tf.float32, [None, IMG_SIZE*IMG_SIZE*NUM_CHANNELS])
    y_ = tf.placeholder(tf.float32, [None, NUM_CLASSES])

    c1,p1,c2,p2,logits = cnn(x)

    cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=logits)
    loss = tf.reduce_mean(cross_entropy)

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_, 1))
    correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)

    N = len(trainX)
    idx = np.arange(N)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        test_acc = []
        training_loss =[]
        for e in range(epochs):
            np.random.shuffle(idx)
            trainX, trainY = trainX[idx], trainY[idx]

            for start, end in zip(range(0, N, batch_size), range(batch_size, N, batch_size)):
                train_step.run(feed_dict={x: trainX[start:end], y_: trainY[start:end]})
            
            training_loss.append(loss.eval(feed_dict={x: trainX, y_: trainY}))
            test_acc.append(accuracy.eval(feed_dict={x: testX, y_: testY}))
                #_, loss_ = sess.run([train_step, loss], {x: trainX, y_: trainY})

            
            print('epoch', e, 'entropy', training_loss[e], 'test accuracy', test_acc[e])
        test1 = testX[0]
        test2 = testX[1]

        plt.figure(1)
        plt.plot(range(epochs), test_acc)
        plt.ylabel('Test Accuracy')
        plt.xlabel('Number of iterations')
        plt.savefig('./A1-Test_accuracy.png')

        plt.figure(2)
        plt.plot(range(epochs), training_loss)
        plt.xlabel('Number of iterations')
        plt.ylabel('Training Loss')
        plt.savefig('./A1-Training_loss.png')
 
        plt.show()

        h_conv1_, h_pool1_, h_conv2_, h_pool2_ = sess.run([c1, p1, c2, p2],{x: test1.reshape(1,3072)})

        plt.figure(3)
        plt.gray()
        plt.imshow(test1.reshape(32,32,3))
        plt.savefig('./A1-test1-original.png')
        
        plt.figure(4)
        plt.gray()
        for i in range(50):
            plt.subplot(10, 5, i+1); plt.axis('off'); plt.imshow(h_conv1_[0,:,:,i])
        plt.savefig('./A1-test1-c1.png')

        plt.figure(5)
        plt.gray()
        for i in range(50):
            plt.subplot(10, 5, i+1); plt.axis('off'); plt.imshow(h_pool1_[0,:,:,i])
        plt.savefig('./A1-test1-p1.png')

        plt.figure(6)
        plt.gray()
        for i in range(60):
            plt.subplot(10, 6, i+1); plt.axis('off'); plt.imshow(h_conv2_[0,:,:,i])
        plt.savefig('./A1-test1-c2.png')

        plt.figure(7)
        plt.gray()
        for i in range(60):
            plt.subplot(10, 6, i+1); plt.axis('off'); plt.imshow(h_pool2_[0,:,:,i])
        plt.savefig('./A1-test1-p2.png')

        plt.show()

        h_conv1_, h_pool1_, h_conv2_, h_pool2_ = sess.run([c1, p1, c2, p2],{x: test2.reshape(1,3072)})

        plt.figure(8)
        plt.gray()
        plt.imshow(test2.reshape(32,32,3))
        plt.savefig('./A1-test2-original.png')
        
        plt.figure(9)
        plt.gray()
        for i in range(50):
            plt.subplot(10, 5, i+1); plt.axis('off'); plt.imshow(h_conv1_[0,:,:,i])
        plt.savefig('./A1-test2-c1.png')

        plt.figure(10)
        plt.gray()
        for i in range(50):
            plt.subplot(10, 5, i+1); plt.axis('off'); plt.imshow(h_pool1_[0,:,:,i])
        plt.savefig('./A1-test2-p1.png')

        plt.figure(11)
        plt.gray()
        for i in range(60):
            plt.subplot(10, 6, i+1); plt.axis('off'); plt.imshow(h_conv2_[0,:,:,i])
        plt.savefig('./A1-test2-c2.png')

        plt.figure(12)
        plt.gray()
        for i in range(60):
            plt.subplot(10, 6, i+1); plt.axis('off'); plt.imshow(h_pool2_[0,:,:,i])
        plt.savefig('./A1-test2-p2.png')

        plt.show()
Exemple #43
0
def main(n_doc=100):
    # Generate toy dataset
    datagen = LdaDataGenerator(n_topic=10, alpha=0.5)
    n_DxV, p_DxK = (), ()
    for d in xrange(n_doc):
        n_V, p_K = datagen.generate_document(n_word=100)
        n_DxV += n_V,
        p_DxK += p_K,
    n_DxV = np.array(n_DxV)
    p_DxK = np.array(p_DxK)

    # Init LDA learner
    lda = LatentDirichletAllocationGibbsSampler(n_topic=datagen.n_topic,
                                                alpha=1.0,
                                                beta=1.0,
                                                n_itr=100)

    # Define callback method
    pl.ion()
    pl.figure('Word topic dist.')

    def lda_monitoring(lda, itr):
        if itr != 0 and (itr % 10 != 0) and itr != lda.n_itr - 1: return
        phi_KxV = lda.get_word_topic_distribution()
        gs = gridspec.GridSpec(1, 2)
        pl.subplot(gs[0])
        pl.title('True')
        visualize_squared_shape_word_dist(datagen.phi_KxV)
        pl.subplot(gs[1])
        pl.title('Inferred at %d' % itr)
        visualize_squared_shape_word_dist(phi_KxV)
        pl.draw()
        pl.show()

    # Infer
    lda.fit(n_DxV, monitoring=lda_monitoring)

    # Show document topic distribution
    theta_KxD = lda.get_document_topic_distribution()
    # Find correspondence between true topics and inferred ones
    dist_KxK = (p_DxK.T**2).sum(axis=1)[:, np.newaxis] \
        - 2 * np.dot(p_DxK.T, theta_KxD.T) \
        + (theta_KxD.T**2).sum(axis=0)[np.newaxis]
    corr_K = dist_KxK.argmin(axis=1)
    pl.figure('Topic dist. of training documents')
    pl.subplot(211)
    pl.title('true p(z|d)')
    pl.imshow(p_DxK.T, interpolation='none', cmap=pl.gray())
    pl.subplot(212)
    pl.title('inferred p(z|theta_d)')
    pl.imshow(theta_KxD[corr_K, :], interpolation='none', cmap=pl.gray())
    pl.show()

    # Generate unseen documents
    n_us_DxV, p_us_DxK = (), ()
    for d in xrange(20):
        n_V, p_K = datagen.generate_document(n_word=100)
        n_us_DxV += n_V,
        p_us_DxK += p_K,
    n_us_DxV = np.array(n_us_DxV)
    p_us_DxK = np.array(p_us_DxK)

    # Infer unseen documents with trained model
    theta_us_KxD = lda.infer_unseen_document(n_us_DxV)

    # Show topic distributions of unseen documents
    pl.ioff()
    pl.figure('Topic dist. of unseen documents')
    pl.subplot(211)
    pl.title('true p(z|d)')
    pl.imshow(p_us_DxK.T, interpolation='none', cmap=pl.gray())
    pl.subplot(212)
    pl.title('inferred p(z|theta_d)')
    pl.imshow(theta_us_KxD[corr_K], interpolation='none', cmap=pl.gray())
    pl.show()
Exemple #44
0
    def train(self):

        class_mode = 'grayscale' if self.channels == 1  else 'rgb'


        train_set, test_set = prepare_dataset(input_size=(self.img_shape[0], self.img_shape[1]),
                                              train_dir=self.train_dir,
                                              test_dir=self.test_dir,
                                              train_batch_size=self.train_batch_size,
                                              test_batch_size=self.test_batch_size,
                                              class_mode='input',
                                              color_mode=class_mode
                                              )

        self.Conv_AutoEncoder.fit_generator(train_set,
                                            epochs=self.epochs,
                                            validation_data=test_set)


        self.Conv_AutoEncoder.save(self.save_model_name)





        '''
        for step, real_img in enumerate(train_set):
            loss     = self.Conv_AutoEncoder.train_on_batch(real_img, real_img)
        '''

        fig = plt.figure()
        for i in range(10):

            img, _ = train_set.next()
            re_img = self.Conv_AutoEncoder.predict(np.expand_dims(img[0], 0))

            print(img[0].shape)

            if self.channels == 1:
                fig.add_subplot(1, 2, 1)
                plt.imshow(img[0].squeeze(axis=2), cmap=pylab.gray())
                fig.add_subplot(1, 2, 2)
                plt.imshow(re_img.squeeze(axis=(0, 3)), cmap=pylab.gray())
            else:
                fig.add_subplot(1, 2, 1)
                plt.imshow(img[0])
                fig.add_subplot(1, 2, 2)
                plt.imshow(re_img.squeeze(axis=0))



            plt.savefig("CAE_result_{}png.".format(str(i)))

            plt.show(block=False)
            plt.pause(1)





        print("\n\n\n\n\n\n\n\n\n\n\n\n\n")
Exemple #45
0
def gendata(enable, os, downsample, textid=None, seed=2313, verbose=False):
    """
    Generate the MNIST+ dataset.
    :param enable: dictionary of flags with keys ['texture', 'azimuth',
    'rotation', 'elevation'] to enable/disable a given factor of variation.
    :param textid: if enable['texture'], id number of the Brodatz texture to
    load. If textid is None, we load a random texture for each MNIST image.
    :param os: output size (width and height) of MNIST+ images.
    :param downsample: factor by which to downsample texture.
    :param seed: integer for seeding RNG.
    :param verbose: bool
    """
    rng = numpy.random.RandomState(seed)

    data = mnist.MNIST('train')
    test = mnist.MNIST('test')
    data.X = numpy.vstack((data.X, test.X))
    data.y = numpy.hstack((data.y, test.y))
    del test

    output = {}
    output['data'] = numpy.zeros((len(data.X), os * os))
    output['label'] = numpy.zeros(len(data.y))
    if enable['azimuth']:
        output['azimuth'] = numpy.zeros(len(data.y))
    if enable['elevation']:
        output['elevation'] = numpy.zeros(len(data.y))
    if enable['rotation']:
        output['rotation'] = numpy.zeros(len(data.y))
    if enable['texture']:
        output['texture_id'] = numpy.zeros(len(data.y))
        output['texture_pos'] = numpy.zeros((len(data.y), 2))

    for i in xrange(len(data.X)):

        # get MNIST image
        frgd_img = to_img(data.X[i], 28)
        frgd_img = frgd_img.convert('L')

        if enable['rotation']:
            rot = rng.randint(0, 360)
            output['rotation'][i] = rot
            frgd_img = frgd_img.rotate(rot, Image.BILINEAR)

        frgd_img = frgd_img.resize((os, os), Image.BILINEAR)

        if enable['texture']:

            if textid is None:
                # extract patch from texture database. Note that texture #14
                # does not exist.
                textid = 14
                while textid == 14:
                    textid = rng.randint(1, 113)

            patch_img, (px, py) = extract_patch(textid, os, downsample)
            patch_arr = to_array(patch_img)

            # store output details
            output['texture_id'][i] = textid
            output['texture_pos'][i] = (px, py)

            # generate binary mask for digit outline
            frgd_arr = to_array(frgd_img)
            mask_arr = frgd_arr > 0.1

            # copy contents of masked-MNIST image into background texture
            blend_arr = copy(patch_arr)
            blend_arr[mask_arr] = frgd_arr[mask_arr]

            # this now because the image to emboss
            frgd_img = to_img(blend_arr, os)

        azi = 45
        if enable['azimuth']:
            azi = rng.randint(0, 360)
            output['azimuth'][i] = azi
        ele = 18.
        if enable['elevation']:
            ele = rng.randint(0, 60)
            output['elevation'][i] = ele

        mboss_img = emboss(frgd_img, azi=azi, ele=ele)
        mboss_arr = to_array(mboss_img)

        output['data'][i] = mboss_arr
        output['label'][i] = data.y[i]

        if verbose:
            pl.imshow(mboss_arr.reshape(os, os))
            pl.gray()
            pl.show()

    fname = 'mnistplus'
    if enable['azimuth']:
        fname += "_azi"
    if enable['rotation']:
        fname += "_rot"
    if enable['texture']:
        fname += "_tex"
    fp = open(fname + '.pkl', 'w')
    pickle.dump(output, fp, protocol=pickle.HIGHEST_PROTOCOL)
    fp.close()
Exemple #46
0
###############################################################################
results_df = single_snp(variants_maf5_lines_filtered,
                        phenotypes_online,
                        leave_out_one_chrom=False,
                        output_file_name='Test-Fast-Lmm')

import pylab
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df.as_matrix(["Chr", "ChrPos", "PValue"]),
                      pvalue_line=1e-5,
                      xaxis_unit_bp=False)
pylab.show()

from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0, 5], ylim=[0, 5])

# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)

results_df.to_csv(
    '../Outputs/Fast-Lmm-Outputs/Quality-Control-Tergite-Online-Adjusted-Phenotype-Leave-False.txt',
    sep='\t')

from pysnptools.standardizer import Unit
from fastlmm.inference.fastlmm_predictor import _snps_fixup, _pheno_fixup, _kernel_fixup, _SnpTrainTest
test_snps = _snps_fixup(test_snps)
K_causal = test_snps.read_kernel(Unit()).standardize()
pylab.imshow(K_causal.val, cmap=pylab.gray(), vmin=0, vmax=1)
Exemple #47
0
    def __init__(self, parent, id, pos, size, style, name):
        self._init_ctrls(parent)
        ##Create a matplotlib figure/canvas in this panel
        ##the background colour will be the same as the panel
        ##the size will also be the same as the panel
        ##calculate size in inches
        pixels_width, pixels_height = self.GetSizeTuple()
        self.dpi = 96.0
        inches_width = pixels_width / self.dpi
        inches_height = pixels_height / self.dpi

        ##calculate colour in RGB 0 to 1
        colour = self.GetBackgroundColour()
        self.fig = Figure(figsize=(inches_width,inches_height), dpi = self.dpi\
            ,facecolor=(colour.Red()/255.0, colour.Green()/255.0, colour.Blue()/255.0)\
            ,edgecolor=(colour.Red()/255.0, colour.Green()/255.0, colour.Blue()/255.0))

        ##left : the left side of the subplots of the figure
        ##     |      right : the right side of the subplots of the figure
        ##     |      bottom : the bottom of the subplots of the figure
        ##     |      top : the top of the subplots of the figure
        ##     |      wspace : the amount of width reserved for blank space between subplots
        ##     |      hspace : the amount of height reserved for white space between subplots
        ##     |

        self.canvas = FigureCanvasWxAgg(self, -1, self.fig)

        ##now put everything in a sizer
        sizer = wx.BoxSizer(wx.VERTICAL)
        # This way of adding to sizer allows resizing
        sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
        self.SetSizer(sizer)
        self.Fit()

        ##now finally create the actual plot
        ##self.axes = self.fig.add_subplot(111)
        self.axes = self.fig.add_axes(
            (0.08, 0.08, 0.90, 0.85))  ##left,bottom,width,height
        self.naohistoryplot = self.axes.plot([0, 0], [0, 0],
                                             'r',
                                             animated=True)
        self.naohistoryx = list()
        self.naohistoryy = list()

        self.positionmeasurementplot = self.axes.plot([0, 0], [0, 0],
                                                      'blue',
                                                      marker='o',
                                                      markersize=5,
                                                      linewidth=0,
                                                      markeredgewidth=0,
                                                      animated=True)
        self.orientationmeasurementplot = self.axes.plot([0, 0], [0, 0],
                                                         'blue',
                                                         linewidth=2,
                                                         animated=True)
        self.shapeplot = self.axes.plot([0, 0], [0, 0],
                                        'blue',
                                        marker='o',
                                        markersize=2,
                                        linewidth=0,
                                        markeredgewidth=0,
                                        animated=True)
        self.estimateplot = self.axes.plot([0, 0], [0, 0],
                                           'red',
                                           linewidth=2,
                                           animated=True)
        self.particleplot = self.axes.quiver([0, 0], [0, 0], [1, 1],
                                             [0.5, -0.5], [1, 1],
                                             cmap=pylab.gray(),
                                             animated=True)

        ##plot formatting
        self.axes.set_title('Nao Image', fontsize='10')
        self.axes.set_xlabel('y (cm)', fontsize='10')
        self.axes.set_ylabel('x (cm)', fontsize='10')
        ticks = numpy.arange(-25, 25 + 5, 5)
        labels = [str(tick) for tick in ticks]
        self.axes.set_yticks(ticks)
        self.axes.set_yticklabels(labels, fontsize=8)
        self.axes.set_ylim(ticks[0], ticks[-1])
        ticks = -numpy.arange(-50, 50 + 5, 5)
        labels = [str(tick) for tick in ticks]
        self.axes.set_xticks(ticks)
        self.axes.set_xticklabels(labels, fontsize=8)
        self.axes.set_xlim(ticks[0], ticks[-1])

        self.canvas.draw()
        self.canvas.gui_repaint()

        # save the clean slate background -- everything but the animated line
        # is drawn and saved in the pixel buffer background
        self.background = self.canvas.copy_from_bbox(self.axes.bbox)
Exemple #48
0
    def __init__(self, dfore, bw):
        self.dfore = dfore
        self.bw = bw

    def sub_bg(self, frame):
        return (self.dfore[frame], self.bw[frame])


bg = BG(dfore, bw)

hind = hindsight.Hindsight(tracks, bg)
hind.initialize_milestones()
print 'milestones: '
print hind.milestones

print 'tracks: '
print tracks

for t in range(2, len(tracks)):
    hind.fixerrors(t)

mpl.gray()
for t in range(len(tracks)):

    mpl.imshow(bw[t])
    for [id, e] in tracks[t].iteritems():
        drawellipse(e, colors[id])
    mpl.axis('tight')
    mpl.title('Frame %d' % t)
    mpl.show()
Exemple #49
0
    def run(self, fname, i):
        print_info("# %s" % (fname))
        print_info("=== %s %-3d" % (fname, i))
        raw = ocrolib.read_image_gray(fname)
        self.dshow(raw, "input")
        # perform image normalization
        image = raw - amin(raw)
        if amax(image) == amin(image):
            print_info("# image is empty: %s" % (fname))
            return
        image /= amax(image)

        if not self.param['nocheck']:
            check = self.check_page(amax(image) - image)
            if check is not None:
                print_error(fname + " SKIPPED. " + check +
                            " (use -n to disable this check)")
                return

        # check whether the image is already effectively binarized
        if self.param['gray']:
            extreme = 0
        else:
            extreme = (np.sum(image < 0.05) +
                       np.sum(image > 0.95)) * 1.0 / np.prod(image.shape)
        if extreme > 0.95:
            comment = "no-normalization"
            flat = image
        else:
            comment = ""
            # if not, we need to flatten it by estimating the local whitelevel
            print_info("flattening")
            m = interpolation.zoom(image, self.param['zoom'])
            m = filters.percentile_filter(m,
                                          self.param['perc'],
                                          size=(self.param['range'], 2))
            m = filters.percentile_filter(m,
                                          self.param['perc'],
                                          size=(2, self.param['range']))
            m = interpolation.zoom(m, 1.0 / self.param['zoom'])
            if self.param['debug'] > 0:
                clf()
                imshow(m, vmin=0, vmax=1)
                ginput(1, self.param['debug'])
            w, h = minimum(array(image.shape), array(m.shape))
            flat = clip(image[:w, :h] - m[:w, :h] + 1, 0, 1)
            if self.param['debug'] > 0:
                clf()
                imshow(flat, vmin=0, vmax=1)
                ginput(1, self.param['debug'])

        # estimate low and high thresholds
        print_info("estimating thresholds")
        d0, d1 = flat.shape
        o0, o1 = int(self.param['bignore'] * d0), int(self.param['bignore'] *
                                                      d1)
        est = flat[o0:d0 - o0, o1:d1 - o1]
        if self.param['escale'] > 0:
            # by default, we use only regions that contain
            # significant variance; this makes the percentile
            # based low and high estimates more reliable
            e = self.param['escale']
            v = est - filters.gaussian_filter(est, e * 20.0)
            v = filters.gaussian_filter(v**2, e * 20.0)**0.5
            v = (v > 0.3 * amax(v))
            v = morphology.binary_dilation(v, structure=ones((int(e * 50), 1)))
            v = morphology.binary_dilation(v, structure=ones((1, int(e * 50))))
            if self.param['debug'] > 0:
                imshow(v)
                ginput(1, self.param['debug'])
            est = est[v]
        lo = stats.scoreatpercentile(est.ravel(), self.param['lo'])
        hi = stats.scoreatpercentile(est.ravel(), self.param['hi'])
        # rescale the image to get the gray scale image
        print_info("rescaling")
        flat -= lo
        flat /= (hi - lo)
        flat = clip(flat, 0, 1)
        if self.param['debug'] > 0:
            imshow(flat, vmin=0, vmax=1)
            ginput(1, self.param['debug'])
        binarized = 1 * (flat > self.param['threshold'])

        # output the normalized grayscale and the thresholded images
        #print_info("%s lo-hi (%.2f %.2f) angle %4.1f %s" % (fname, lo, hi, angle, comment))
        print_info("%s lo-hi (%.2f %.2f) %s" % (fname, lo, hi, comment))
        print_info("writing")
        if self.param['debug'] > 0 or self.param['show']:
            clf()
            gray()
            imshow(binarized)
            ginput(1, max(0.1, self.param['debug']))
        base, _ = ocrolib.allsplitext(fname)
        ocrolib.write_image_binary(base + ".bin.png", binarized)
        ocrolib.write_image_gray(base + ".nrm.png", flat)
        # print("########### File path : ", base+".nrm.png")
        # write_to_xml(base+".bin.png")
        return base + ".bin.png"
Exemple #50
0
 def predict(self, index):
     pl.gray()
     pl.matshow(random_forest.data.images[index])
     pl.show()
     prediction = self.classifier.predict([self.X[index]])
     print(f"prediction is => {prediction} ")
    def single_test(self, type):
        shape = np.random.randint(*range2d, size=2)
        # these rectangles are biased, but that shouldn't matter
        rect_min = [np.random.randint(0, a) for a in shape]
        rect_max = [
            np.random.randint(rect_min[i] + 1, shape[i] + 1)
            for i in range(len(shape))
        ]
        if True:
            #pixsize = 0.5 + np.random.random(size=2)
            pixsize = np.array([0.5, 0.5]) + np.random.random()
            origin = 10 * np.random.random(size=2)
        else:
            pixsize = (1., 1.)
            origin = (0., 0.)
        vg = astra.create_vol_geom(shape[1], shape[0],
                                   origin[0] - 0.5 * shape[0] * pixsize[0],
                                   origin[0] + 0.5 * shape[0] * pixsize[0],
                                   origin[1] - 0.5 * shape[1] * pixsize[1],
                                   origin[1] + 0.5 * shape[1] * pixsize[1])
        #print(vg)

        if type == 'parallel':
            pg = gen_random_geometry_parallel()
            projector_id = astra.create_projector('line', pg, vg)
        elif type == 'parallel_vec':
            pg = gen_random_geometry_parallel_vec()
            projector_id = astra.create_projector('line', pg, vg)
        elif type == 'fanflat':
            pg = gen_random_geometry_fanflat()
            projector_id = astra.create_projector('line_fanflat', pg, vg)
        elif type == 'fanflat_vec':
            pg = gen_random_geometry_fanflat_vec()
            projector_id = astra.create_projector('line_fanflat', pg, vg)

        data = np.zeros((shape[1], shape[0]), dtype=np.float32)
        data[rect_min[1]:rect_max[1], rect_min[0]:rect_max[0]] = 1

        sinogram_id, sinogram = astra.create_sino(data, projector_id)

        #print(pg)
        #print(vg)

        astra.data2d.delete(sinogram_id)

        astra.projector.delete(projector_id)

        a = np.zeros(np.prod(astra.functions.geom_size(pg)), dtype=np.float32)
        b = np.zeros(np.prod(astra.functions.geom_size(pg)), dtype=np.float32)
        c = np.zeros(np.prod(astra.functions.geom_size(pg)), dtype=np.float32)

        i = 0
        #print( origin[0] + (-0.5 * shape[0] + rect_min[0]) * pixsize[0], origin[0] + (-0.5 * shape[0] + rect_max[0]) * pixsize[0], origin[1] + (-0.5 * shape[1] + rect_min[1]) * pixsize[1], origin[1] + (-0.5 * shape[1] + rect_max[1]) * pixsize[1])
        for src, det in gen_lines(pg):
            #print(src,det)

            # NB: Flipped y-axis here, since that is how astra interprets 2D volumes
            # We compute line intersections with slightly bigger (cw) and
            # smaller (aw) rectangles, and see if the kernel falls
            # between these two values.
            (aw, bw, cw) = intersect_line_rectangle_interval(
                src, det,
                origin[0] + (-0.5 * shape[0] + rect_min[0]) * pixsize[0],
                origin[0] + (-0.5 * shape[0] + rect_max[0]) * pixsize[0],
                origin[1] + (+0.5 * shape[1] - rect_max[1]) * pixsize[1],
                origin[1] + (+0.5 * shape[1] - rect_min[1]) * pixsize[1], 1e-3)
            a[i] = aw
            b[i] = bw
            c[i] = cw
            i += 1
        # Add weight for pixel / voxel size
        try:
            detweight = pg['DetectorWidth']
        except KeyError:
            detweight = np.sqrt(pg['Vectors'][0, 4] * pg['Vectors'][0, 4] +
                                pg['Vectors'][0, 5] * pg['Vectors'][0, 5])
        a *= detweight
        b *= detweight
        c *= detweight
        a = a.reshape(astra.functions.geom_size(pg))
        b = b.reshape(astra.functions.geom_size(pg))
        c = c.reshape(astra.functions.geom_size(pg))

        # Check if sinogram lies between a and c
        y = np.min(sinogram - a)
        z = np.min(c - sinogram)
        x = np.max(np.abs(sinogram -
                          b))  # ideally this is small, but can be large
        # due to discontinuities in line kernel
        self.assertFalse(z < 0 or y < 0)
        if z < 0 or y < 0:
            print(y, z, x)
            pylab.gray()
            pylab.imshow(data)
            pylab.figure()
            pylab.imshow(sinogram)
            pylab.figure()
            pylab.imshow(b)
            pylab.figure()
            pylab.imshow(a)
            pylab.figure()
            pylab.imshow(c)
            pylab.figure()
            pylab.imshow(sinogram - a)
            pylab.figure()
            pylab.imshow(c - sinogram)
            pylab.show()
Exemple #52
0
def PlotStuff():
    files_list, max_list = get_stuff()

    x = 2
    y = 3

    INframeW = 1.5
    INframeH = 1.5
    INgapw = 0.05 * np.ones(x + 1)
    INgapw[0] = 0.2
    INgaph = 0.05 * np.ones(y + 1)
    INgaph[3] = 0.2
    INfig = FigArray(INframeW, INframeH, INgapw, INgaph)
    (W, H) = INfig.dimensions()
    plt.figure(1, figsize=(W, H))

    plt.text(-0.08,
             1.085,
             "L = 1 pix, C = 5 mag",
             fontsize=10,
             rotation=0,
             color='k')
    plt.text(0.53,
             1.085,
             "L = 4 pix, C = 2 mag",
             fontsize=10,
             rotation=0,
             color='k')

    plt.text(-0.15,
             1.04,
             "detector scale model",
             fontsize=9,
             rotation=90,
             color='k')
    plt.text(-0.15,
             0.65,
             "NIRISS sim observation",
             fontsize=9,
             rotation=90,
             color='k')
    plt.text(-0.15,
             0.25,
             "NIRCam sim observation",
             fontsize=9,
             rotation=90,
             color='k')

    plt.axis('off')

    ctr = 0
    for i in range(0, x):
        for j in range(0, y):

            disp = files_list[ctr]
            disp = np.power(disp, PWR)
            #disp = disp_psf[30:55,30:55]

            dispmax = max_list[ctr]
            dispmax = np.power(dispmax, PWR)

            a = plt.axes(INfig.axes(i + 1, j + 1))

            #plt.text(0,1, '%d' %(ctr), fontsize=6, rotation=0, color='w')

            p = plt.imshow(disp,
                           vmax=dispmax,
                           vmin=0,
                           cmap='hot',
                           interpolation='nearest')

            a.xaxis.set_major_locator(plt.NullLocator())
            a.yaxis.set_major_locator(plt.NullLocator())
            plt.gray()  # overrides current and sets default
            plt.axis("off")

            ctr += 1

    plt.savefig(LOC + STUFF_PLOT, dpi=150)
Exemple #53
0
    def _process_segment(self, page_image, page, page_xywh, page_id,
                         input_file, n):
        raw = ocrolib.pil2array(page_image)
        if len(raw.shape) > 2:
            raw = np.mean(raw, 2)
        raw = raw.astype("float64")
        # perform image normalization
        image = raw - amin(raw)
        if amax(image) == amin(image):
            LOG.info("# image is empty: %s" % (page_id))
            return
        image /= amax(image)

        # check whether the image is already effectively binarized
        if self.parameter['gray']:
            extreme = 0
        else:
            extreme = (np.sum(image < 0.05) +
                       np.sum(image > 0.95)) * 1.0 / np.prod(image.shape)
        if extreme > 0.95:
            comment = "no-normalization"
            flat = image
        else:
            comment = ""
            # if not, we need to flatten it by estimating the local whitelevel
            LOG.info("Flattening")
            m = interpolation.zoom(image, self.parameter['zoom'])
            m = filters.percentile_filter(m,
                                          self.parameter['perc'],
                                          size=(self.parameter['range'], 2))
            m = filters.percentile_filter(m,
                                          self.parameter['perc'],
                                          size=(2, self.parameter['range']))
            m = interpolation.zoom(m, 1.0 / self.parameter['zoom'])
            if self.parameter['debug'] > 0:
                clf()
                imshow(m, vmin=0, vmax=1)
                ginput(1, self.parameter['debug'])
            w, h = minimum(array(image.shape), array(m.shape))
            flat = clip(image[:w, :h] - m[:w, :h] + 1, 0, 1)
            if self.parameter['debug'] > 0:
                clf()
                imshow(flat, vmin=0, vmax=1)
                ginput(1, self.parameter['debug'])

        # estimate low and high thresholds
        LOG.info("Estimating Thresholds")
        d0, d1 = flat.shape
        o0, o1 = int(self.parameter['bignore'] * d0), int(
            self.parameter['bignore'] * d1)
        est = flat[o0:d0 - o0, o1:d1 - o1]
        if self.parameter['escale'] > 0:
            # by default, we use only regions that contain
            # significant variance; this makes the percentile
            # based low and high estimates more reliable
            e = self.parameter['escale']
            v = est - filters.gaussian_filter(est, e * 20.0)
            v = filters.gaussian_filter(v**2, e * 20.0)**0.5
            v = (v > 0.3 * amax(v))
            v = morphology.binary_dilation(v, structure=ones((int(e * 50), 1)))
            v = morphology.binary_dilation(v, structure=ones((1, int(e * 50))))
            if self.parameter['debug'] > 0:
                imshow(v)
                ginput(1, self.parameter['debug'])
            est = est[v]
        lo = stats.scoreatpercentile(est.ravel(), self.parameter['lo'])
        hi = stats.scoreatpercentile(est.ravel(), self.parameter['hi'])
        # rescale the image to get the gray scale image
        LOG.info("Rescaling")
        flat -= lo
        flat /= (hi - lo)
        flat = clip(flat, 0, 1)
        if self.parameter['debug'] > 0:
            imshow(flat, vmin=0, vmax=1)
            ginput(1, self.parameter['debug'])
        binarized = 1 * (flat > self.parameter['threshold'])

        # output the normalized grayscale and the thresholded images
        # print_info("%s lo-hi (%.2f %.2f) angle %4.1f %s" % (fname, lo, hi, angle, comment))
        LOG.info("%s lo-hi (%.2f %.2f) %s" % (page_id, lo, hi, comment))
        LOG.info("writing")
        if self.parameter['debug'] > 0 or self.parameter['show']:
            clf()
            gray()
            imshow(binarized)
            ginput(1, max(0.1, self.parameter['debug']))

        page_xywh['features'] += ',binarized'

        bin_array = array(255 * (binarized > ocrolib.midrange(binarized)), 'B')
        bin_image = ocrolib.array2pil(bin_array)

        file_id = input_file.ID.replace(self.input_file_grp, self.image_grp)
        if file_id == input_file.ID:
            file_id = concat_padded(self.image_grp, n)
        file_path = self.workspace.save_image_file(
            bin_image,
            file_id,
            page_id=page_id,
            file_grp=self.image_grp,
            force=self.parameter['force'])
        page.add_AlternativeImage(
            AlternativeImageType(filename=file_path,
                                 comments=page_xywh['features']))
Exemple #54
0
def plot_forecast_area(ttt, model, outputDir, current_labels = None, t_stop=None, BackgroundFile=None, ForeGroundRGBFile=None, labels_dir = '/opt/users/'+getpass.getuser()+'/PyTroll/scripts/labels/', in_msg = None):
    verbose = True
    if t_stop is None:
        t_stop = ttt
    
    ylabel = "area"

    while ttt <= t_stop:
        yearS, monthS, dayS, hourS, minS = string_date(ttt)
        if verbose:
            print("******** read cell properties from shelve")
        
        if current_labels is None:
              yearS, monthS, dayS, hourS, minS = string_date(ttt)
              filename = 'Labels_%s.shelve'%(yearS+monthS+dayS+hourS+minS)
              myShelve = shelve.open(filename)
              labels_all = deepcopy(myShelve['labels'])
        else:
              labels_all = deepcopy(current_labels)
        if verbose:
            print(labels_all)
        
        unique_labels = np.unique(labels_all[labels_all>0])
        if verbose:
            print(("... cells with unique labels: ", unique_labels))
                
        forecasted_labels = {}
        forecasted_areas = []    
        at_least_one_cell = False        

        if verbose:
            print("*** computing history backward (", labels_dir, ")")

        for interesting_cell in unique_labels:

              forecasted_labels["ID"+str(interesting_cell)]=[]
              
              # calculate backward history for 1 hour and save it in labels_dir
              ind, area, displacement, time, center = history_backward(ttt,  interesting_cell, True, in_msg, ttt-timedelta(hours = 1), labels_dir=labels_dir) #-timedelta(minutes = 10))
              #                                                        current time, cell_id, backward?   time_stop
              if area is None or len(area)<=1:  
                  if verbose:
                        print("new cell or cell with COM outside domain")
                  continue
              at_least_one_cell = True 
                 
              if len(area)<=3:
                    # if history is too short, use linear extrapolation
                    t, y = future_properties(time, area, ylabel, "linear")
              else:
                    t, y = future_properties(time, area, ylabel, model)
              
              if False:
                    ind1, area1, displacement1, time1, center = history_backward(ttt, interesting_cell, False, ttt+timedelta(hours=1), labels_dir=labels_dir)
                    print("******** computed history forward")
            
                    t2 = time1 #[::-1]
                    y2 = area1 #[::-1]
            
            
              nx,ny = labels_all.shape
              #if verbose:
              #    print(nx,ny)
      
              label_cell = np.zeros(labels_all.shape)
              label_cell[labels_all==interesting_cell] = 1
              #pickle.dump(label_cell, open("test_label.p", "wb" ) )
              #quit()
              dt = 0
              if False:
                  figure_labels(label_cell, outputDir, ttt, dt, area_plot="ccs4", add_name = "_ID"+str(interesting_cell), verbose=verbose)
      
              area_current = sum(sum(label_cell))
      
              forecasted_areas.append(area_current)
      
              indx = np.where(t==ttt)[0] + 1
      
              if verbose:
                    print("*** compute displacement ")

              if displacement.shape[1]==2:
                    if len(displacement) == 0:
                        dx = 0
                        dy = 0
                    else:
                        try:
                            dx = int(round(displacement[:,0].mean()))
                            dy = int(round(displacement[:,1].mean()))
                        except ValueError:
                            print("VALUE ERROR")
                            print(displacement)
                            quit()
                    print("    computed displacement dx, dy = ", dx, dy)
      
              else:
                    print("wrong displacement")
                    quit()
      
              labels_in_time={}
              
              index_stop = 12
              
              
              print(("*** calculate forecasts for cell ID"+str(interesting_cell)))
              if verbose:
                  print("index   time    area  growth")
                  print("----------------------------")

              for i in range(13):
                  
                  dt += 5
                  #if verbose:
                  #    print("... for time ", dt ,", index ", indx + i)

                  if indx+i >= len(y):
                      index_stop = deepcopy(i)
                      break
                  else:    
                      area_new  = y[indx+i]
                      area_prev = y[indx+i-1]

                  #if verbose:
                  #    print("area px that will be grown ", area_current)
                  #    print("area forecasted ", area_new)
                  #    print("area forecasted prev ", area_prev)

                  ###growth = sqrt(float(area_new)/float(area_current))
                  
                  if area_new < 0 or len(area_new)==0 or len(area_prev)==0:
                      if verbose:
                          print("the cell is predicted to disappear")
                      index_stop = deepcopy(i)
                      break
                  
                  growth = sqrt(float(area_new)/float(area_prev))
                  #if verbose:
                  #    print("growing by ", growth)
                  #    print("dx ", dx)
                  #    print("dy ", dy)

                  if verbose:
                      print((indx + i, dt, area_new, growth)) 

                  #figure_labels(label_cell, outputDir, ttt, dt, area_plot="ccs4", add_name = "before")

                  shifted_label = resize_array(label_cell, dx, dy, nx, ny)

                  #figure_labels(shifted_label, outputDir, ttt, dt, area_plot="ccs4", add_name = "before_shifted")
                  #quit()
                  if verbose:
                      print(("   after shift ", sum(sum(shifted_label))))
                  
                  if sum(sum(shifted_label))==0: #the cell is outside the domain
                      break
                  
                  #center of mass before resizing
                  center_before = ndimage.measurements.center_of_mass(shifted_label)
                  center_before = np.rint(center_before)        
                  #if verbose:
                  #    print("   after shift ", sum(sum(shifted_label)))

                  resized_label = scipy.misc.imresize(shifted_label,float(growth),'nearest')
      
                  resized_label[resized_label >0] = 1
                          
                  temp_label = np.zeros((nx,ny))

                  #after resizing, the array is larger/smaller than nx,ny --> create new array that contains all the label region                  
                  if resized_label.shape[0]<nx:
                      temp_label[0:resized_label.shape[0],0:resized_label.shape[1]] = deepcopy(resized_label)
                  else:
                      x_start = max(min(np.nonzero(resized_label)[0])-1,0)
                      y_start = max(min(np.nonzero(resized_label)[1])-1,0)      
                      temp_label[0:min(nx,resized_label.shape[0]-x_start),0:min(ny,resized_label.shape[1]-y_start)] = deepcopy(resized_label[x_start:min(x_start+nx,resized_label.shape[0]),y_start:min(y_start+ny,resized_label.shape[1])])            
                  
                  #if verbose:
                  #    print(np.unique(temp_label))
                  #    print("   after resize ", sum(sum(temp_label)))

                  #figure_labels(resized_label, outputDir, ttt, dt, area_plot="ccs4", add_name = "before_shifted_resized")
      
                  #center of mass after resizing
                  center_after = ndimage.measurements.center_of_mass(temp_label)
                  center_after = np.rint(center_after)         
      
                  dx_new,dy_new = center_before - center_after
      
                  shifted_label = resize_array(temp_label,dx_new,dy_new, nx, ny)
                  #if verbose:
                  #    print("   after shift2 ", sum(sum(shifted_label)))
                  label_cell = np.zeros((nx,ny))

                  label_cell[0:,0:] = shifted_label[0:nx,0:ny]
      
                  if label_cell.shape[0] != nx or label_cell.shape[1] != ny:
                        print("incorrect size")
                        quit()
                  
                  forecasted_labels["ID"+str(interesting_cell)].append(deepcopy(label_cell))
                  
                  
                  #indx+=1
      
                  label_cell = shifted_label #????????????????????????????????????
      
                  area_current = sum(sum(label_cell))
                  if verbose:
                      print(("end ", area_current))
                  forecasted_areas.append(area_current)
                  #add check to make sure the area you produced is more or less correct
      
      
              t_temp = deepcopy(ttt)
              forecasted_time = []
      
              for gg in range(len(forecasted_areas)):
                  forecasted_time.append(t_temp)
                  t_temp+=timedelta(minutes = 5)
      
              """
              if verbose:
                print("******** produce images")

              if False:
                  t_composite = deepcopy(ttt)
                  for i in range(min(len(y),index_stop)):
          
                      yearSf, monthSf, daySf, hourSf, minSf = string_date(t_composite)
                      contour_file = outputDir + "Forecast"+yearS+monthS+dayS+"_Obs"+hourS+minS+"_Forc"+hourSf+minSf+"_ID"+str(interesting_cell)+".png"    
                      type_image = "_HRV"
                      #background_file = "/data/COALITION2/PicturesSatellite//"+yearS+"-"+monthS+"-"+dayS+"/"+yearS+"-"+monthS+"-"+dayS+type_image+"_"+"ccs4"+"/MSG"+type_image+"-"+"ccs4"+"_"+yearS[2:]+monthS+dayS+hourS+minS+".png"
                      background_file = "/data/COALITION2/PicturesSatellite/LEL_results_wind/"+yearS+"-"+monthS+"-"+dayS+"/RGB-HRV_dam/"+yearS+monthS+dayS+"_"+hourS+minS+"*.png"            
                      out_file1 = create_dir( outputDir+"/Contours/")+"Obs"+hourS+minS+"_Forc"+hourSf+minSf+"_ID"+str(interesting_cell)+".png"
                      if verbose:
                          print("... create composite "+contour_file+" "+background_file+" "+out_file1)
                      #subprocess.call("/usr/bin/composite "+contour_file+" "+background_file+" "+out_file1, shell=True)
                      if verbose:
                          print("... saved composite: display ", out_file1, " &")
                      t_composite+=timedelta(minutes=5)
              """
              """
              if False:
                  fig, ax = plt.subplots()
                  ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
                  ax.plot_date(t, y, 'o',label="Fit and extrapolation")
                  ax.plot_date(forecasted_time, forecasted_areas, '*',label="forecasted")
                  ax.plot_date(t2, y2, '*', label="Observations")
                  #ax.set_xlim([t[0]-timedelta(minutes = 5), t2[-1]+timedelta(minutes = 5)])
                  ax.set_ylabel("area")
                  ax.legend(loc="best");
                  fig.savefig(yearS+monthS+dayS+"_"+hourS+minS+"_AreaInTime"+"ID"+str(interesting_cell)+".png")
                  plt.close( fig)    
              """      

        t_composite = deepcopy(ttt)
        
        # merge coalition2 file with 
        if ForeGroundRGBFile is None:
            currentRGB_im_filename = "/opt/users/"+getpass.getuser()+"/PyTroll/scripts/Mecikalski/cosmo/Channels/indicators_in_time/RGB_dam/"+yearS+monthS+dayS+"_"+hourS+minS+"*ccs4.png"
        else:
            currentRGB_im_filename = ForeGroundRGBFile
        
        currentRGB_im = glob.glob(currentRGB_im_filename)
        if len(currentRGB_im)<1:  
            print("No file found:", currentRGB_im_filename)

        # get background file 
        if BackgroundFile is None:
            background_im_filename = '/data/COALITION2/PicturesSatellite/LEL_results_wind/'+yearS+'-'+monthS+'-'+dayS+'/RGB-HRV_dam/'+yearS+monthS+dayS+'_'+hourS+minS+'*.png'
        else:
            background_im_filename = BackgroundFile
        background_im = glob.glob(background_im_filename)

        if len(background_im)>0:
            im = plt.imread(background_im[0])
            back_exists = True
        else:
            back_exists = False
        #img1 = Image.imread(currentRGB_im[0])

        obj_area = get_area_def("ccs4")
        fig,ax = prepare_figure(obj_area)
        if in_msg.nrt == False:
              if back_exists:
                  plt.imshow(np.flipud(im))   
              else:
                  # now read the data we would like to forecast
                  global_data = GeostationaryFactory.create_scene(in_msg.sat_str(), in_msg.sat_nr_str(), "seviri", ttt)
                  #global_data_RGBforecast = GeostationaryFactory.create_scene(in_msg.sat, str(10), "seviri", time_slot)
      
                  # area we would like to read
                  area2load = "EuropeCanary95" #"ccs4" #c2"#"ccs4" #in_windshift.ObjArea
                  area_loaded = get_area_def(area2load )#(in_windshift.areaExtraction)  
  
                  # load product, global_data is changed in this step!
                  area_loaded = load_products(global_data, ['IR_108'], in_msg, area_loaded ) 
                  data = global_data.project("ccs4")                  
                  plt.imshow(np.flipud(data['IR_108'].data),cmap = pylab.gray())
        
        # background file form function argument or default
        if BackgroundFile is None:
            background_im_filename = '/data/COALITION2/PicturesSatellite/LEL_results_wind/'+yearS+'-'+monthS+'-'+dayS+'/RGB-HRV_dam/'+yearS+monthS+dayS+'_'+hourS+minS+'*.png'
        else:
            if verbose:
                print("... BackgroundFile ", BackgroundFile)
            background_im_filename = BackgroundFile
            
        # searching background file (wildcards are possible)
        background_im = glob.glob(background_im_filename)
        if len(background_im) == 0:
            print("*** Error in plot_forecast_area (test_forecast.py)")
            print("    no background file found: ", background_im_filename)
            quit()
        elif len(background_im) > 1:
            print("*** Warning in plot_forecast_area (test_forecast.py)")
            print("    several background files found: ", background_im)

        # read background file
        im = plt.imread(background_im[0])
        
        #img1 = Image.imread(currentRGB_im[0])
        obj_area = get_area_def("ccs4")
        fig,ax = prepare_figure(obj_area)
        #plt.imshow(np.flipud(im))   

        # plot contour lines for all cells

        if at_least_one_cell:      
              time_wanted_minutes = [5,20,40,60] 
              time_wanted = []
              color_wanted = []
              vmax = 70
              
              for t_want in time_wanted_minutes:
                  time_wanted.append((t_want-5)/5)
                  tmp = (mpl.cm.Blues(float(t_want)/vmax))
                  tmp1 = [tmp]
                  color_wanted.append(tmp1)
              
              all_labels_in_time = np.zeros((nx,ny))
              
              for i in range(len(time_wanted)-1,-1,-1):
                  ind_time = time_wanted [i]
                  
                  for key, forc_labels in forecasted_labels.items():  #forecasted_labels["ID"+str(interesting_cell)]=[]  
                      
                      if len(forc_labels)>ind_time:
                          #plt.contour(np.flipud(forc_labels[ind_time]),[0.5],colors = color_wanted_cont[i]) #colors='w') #
                          
                          all_labels_in_time[forc_labels[ind_time]>0] = time_wanted_minutes[i]                     
              
              forc_labels_tmp = np.ma.masked_where(all_labels_in_time==0,all_labels_in_time)
              plt.contourf(np.flipud(forc_labels_tmp), cmap="Blues", vmin=0, vmax=vmax)    
              
              
              if False:    
                    for i in range(len(time_wanted)):
                        
                        ind_time = time_wanted [i]
                        
                        for key, forc_labels in forecasted_labels.items():  #forecasted_labels["ID"+str(interesting_cell)]=[]  
                            
                            if len(forc_labels)>ind_time:
                                plt.contour(np.flipud(forc_labels[ind_time]),[0.5],colors = color_wanted[i]) #colors='w') #
        else:
            print("*** Warning, no COALITION2 cell detected ")
            print("    produce empty figure ...")
        
        
        PIL_image = fig2img ( fig )
        
        standardOutputName = in_msg.standardOutputName.replace('%y%m%d%H%M',strftime('%y%m%d%H%M',ttt.timetuple()))
        
        #PIL_image.paste(img1, (0, 0), img1)
        if in_msg is None:
            PIL_image.save(create_dir(outputDir)+"Forecast"+yearS+monthS+dayS+"_Obs"+hourS+minS+".png")
            path = (outputDir)+yearS+monthS+dayS+hourS+minS+"Forecast.png"
        else:

            # dic_figure={}
            # if in_msg.nrt == True:
            #     dic_figure['rgb']= 'Forecast' #'C2rgbForecastTMP-IR-108'
            # else:
            #     dic_figure['rgb']= 'Forecast-C2rgb'
            # dic_figure['area']='ccs4'
            # PIL_image.save(create_dir(outputFile)+standardOutputName%dic_figure)
            # path = (outputFile)+standardOutputName%dic_figure
            # if in_msg.nrt == False:
            #     dic_figure={}
            #     dic_figure['rgb']= 'C2rgb-Forecast-HRV' #'C2rgbForecastTMP-IR-108'
            #     dic_figure['area']='ccs4'
            #     path_output = (outputFile)+standardOutputName%dic_figure
            #     print ("creating composite: ",currentRGB_im[0],"+",path)
        #        subprocess.call("/usr/bin/composite "+currentRGB_im[0]+" "+path+" "+path_output, shell=True)
        
        #print ("... display ",path_output," &")

            #dic_figure={}
            #dic_figure['rgb']= 'Forecast' #'C2rgbForecastTMP-IR-108'
            #dic_figure['area']='ccs4'
            outputFile = format_name(create_dir(outputDir)+in_msg.outputFile, ttt, rgb='Forecast', area='ccs4', sat_nr=int(in_msg.sat_nr))
            #PIL_image.save(create_dir(outputDir)+in_msg.outputFile%dic_figure)
            PIL_image.save(outputFile)
            #path = (outputDir)+in_msg.outputFile%dic_figure
            path = outputFile

        print("... display ",path," &")

        plt.close( fig)                             
        if True:
            if verbose:
                print("path foreground", currentRGB_im[0])
            
            if in_msg is None:
                path_composite = (outputFile)+yearS+monthS+dayS+"_Obs"+hourS+minS+"Forecast_composite.png"     
            else:
                # dic_figure={}
                # dic_figure['rgb']='C2rgb-Forecast-HRV'
                # dic_figure['area']='ccs4'
                # path_composite = (outputFile)+standardOutputName%dic_figure
                #dic_figure = {}
                #dic_figure['rgb'] = "_HRV" #'IR-108'
                #dic_figure['area']='ccs4'
                #path_IR108 = (outputFile)+standardOutputName%dic_figure

                #dic_figure={}
                #dic_figure['rgb'] = 'C2rgbForecast-IR-108'
                #dic_figure['area'] = 'ccs4'
                #path_composite = (outputDir) + in_msg.outputFile%dic_figure
                path_composite = format_name( outputDir+in_msg.outputFile, ttt, rgb='C2rgbForecast-IR-108', area='ccs4', sat_nr=int(in_msg.sat_nr))
                #dic_figure = {}
                #dic_figure['rgb'] = 'IR-108'
                #dic_figure['area']='ccs4'
                #path_IR108 = (outputDir) + in_msg.outputFile%dic_figure
                path_IR108 = format_name( outputDir+in_msg.outputFile, ttt, rgb='IR-108', area='ccs4', sat_nr=int(in_msg.sat_nr))

                
            if in_msg.nrt == True:
                if verbose:
                    print("---starting post processing")
                #if area in in_msg.postprocessing_areas:
                in_msg.postprocessing_composite = deepcopy(in_msg.postprocessing_composite2)

                postprocessing(in_msg, ttt, in_msg.sat_nr, "ccs4")
            #print ("... display",path_composite,"&")
            if in_msg.scpOutput and in_msg.nrt == True and False: #not necessary because already done within postprocessing
                print("... secure copy "+path_composite+ " to "+in_msg.scpOutputDir) #
                subprocess.call("scp "+in_msg.scpID+" "+path_composite  +" "+in_msg.scpOutputDir+" 2>&1 &", shell=True)    #BackgroundFile   #
        
        if False:
            for i in range(12):    
                  contour_files = glob.glob(outputDir + "Forecast"+yearS+monthS+dayS+"_Obs"+hourS+minS+"_Forc"+hourSf+minSf+"_ID*.png")
                  if verbose:
                            print(("Files found: ",contour_files))
                  if len(contour_files)>0:
                      background_file = "/data/COALITION2/PicturesSatellite/LEL_results_wind/"+yearS+"-"+monthS+"-"+dayS+"/RGB-HRV_dam/"+yearS+monthS+dayS+"_"+hourS+minS+"*.png"
                      out_file1 = create_dir( outputDir+"/Contours/")+"Obs"+hourS+minS+"_Forc"+hourSf+minSf+".png"
                  t_composite+=timedelta(minutes=5)  
  
        ttt += timedelta(minutes = 5)
Exemple #55
0
                for jj in range(Nf):
                    num += (filt[Mf-1-ii, Nf-1-jj] * image[i-Mf2+ii, j-Nf2+jj])
            result[i, j] = num
    return result

image = lena()
filter = ones((7,7), dtype='int32')

import time
start = time.time()
result = filter2d(image, filter)
duration = time.time() - start

from scipy.ndimage import convolve
start = time.time()
result = convolve(image, filter)
duration2 = time.time() - start

print "Time for LLVM code = %f\nTime for convolve = %f" % (duration, duration2)

from pylab import subplot, imshow, show, title, gray
subplot(1,2,1)
imshow(image)
title('Original Image')
gray()
subplot(1,2,2)
imshow(result)
title('Filtered Image')
gray()
show()
def s(img):
    pl.imshow(img)
    pl.gray()
    pl.show()
def show_img(img):
    #show image
    thresh = cv2.adaptiveThreshold(img, 255, 1, 1, 11, 15)
    _ = pl.imshow(thresh, cmap=pl.gray())
    _ = pl.axis("off")
    pl.show()
Exemple #58
0
def s(fig):
    pl.imshow(fig)
    pl.gray()
    pl.show()
Exemple #59
0
from scipy import ndimage
import matplotlib.image as mpimg
from matplotlib.colors import NoNorm
import pylab
from scipy import misc
import numpy as np

#http://scikit-image.org/docs/dev/user_guide/transforming_image_data.html
#im_image = np.uint8(100*np.ones((28, 28)))
#im_image =100.*np.ones((28,28), dtype=np.float32)

im_image = 100 * np.ones((28, 28), dtype=np.uint8)
im_image[1:3, 1:3] = 200
plt.subplot(211)

plt.imshow(im_image, cmap=pylab.gray(), norm=NoNorm())

plt.imsave('C:/pythonwork/images/Pimage1.png',
           im_image)  # uses the Image module (PIL)

#convert image (np.array) to binary image
#https://stackoverflow.com/questions/40449781/convert-image-np-array-to-binary-image
im_label = im_image < 120
#im_label=np.zeros((28,28), dtype=bool)
#im_label[1:3, 1:3] =np.array([[True, True] , [True, True]])
plt.subplot(212)
plt.imshow(im_label, cmap=plt.cm.binary)
plt.show()
plt.imsave('C:/pythonwork/labels/Plabel1.png',
           im_label)  # uses the Image module (PIL)
import numpy as np

filename = r'./image_processing/example.png'
'''
img = plt.imread(filename)
plt.imshow(img) 
plt.show()
'''

import pylab as plt
from PIL import Image
import numpy as np
img = Image.open(filename)
print(img)
img_gray = img.convert('L')  #转换成灰度图像
img = np.array(img)
print(img.shape)
print(img[0])
img_gray = np.array(img_gray)
print(img_gray.shape)
print(img_gray[0])

plt.imshow(
    img
)  # or plt.imshow(img / 255.0),matplotlib和matlab一样,如果是float类型的图像,范围是0-1才能正常imshow,如果是uint8图像,范围则需要是0-255
plt.show()
plt.imshow(img_gray, cmap=plt.gray())  # 显示灰度图要设置cmap参数
plt.show()
plt.imshow(Image.open(filename))  # 实际上plt.imshow可以直接显示PIL格式图像
plt.show()