Exemplo n.º 1
0
 def pickPoints(self):
     '''
     Allows the user to select points in the images one at a time instead of 
     automatically building the panorama  
     NOTE: the images should be ordered so that 2 over laps with 1
           and 3 overlaps with 2 and so on
     '''
     for j in range(len(self.panImages)-1):
         key = None
         while key != 27:
             
             self.panImages[j].setNpImg( imgutil.cv2array(self.panImages[j].getImg()))
               
             # draw points
             imgOneInfo = self.panImages[j]
             pts1 = imgOneInfo.getGrab().getPoints()
             for pxy in pts1:
                 cv.Rectangle(imgOneInfo.getImg(), (pxy[0]-15, pxy[1]-15), (pxy[0]+15, pxy[1]+15), (0, 0, 255)) 
             
             # draw points
             imgTwoInfo = self.panImages[j+1]
             pts2 = imgTwoInfo.getGrab().getPoints()
             for pxy in pts2:
                 cv.Rectangle(imgTwoInfo.getImg(), (pxy[0]-15, pxy[1]-15), (pxy[0]+15, pxy[1]+15), (0, 0, 255))   
             
             
             cv.ShowImage(self.enumImageNames[j], self.panImages[j].getImg())
             cv.ShowImage(self.enumImageNames[j+1], self.panImages[j+1].getImg())
            
             # handle keys
             key = cv.WaitKey(100)
             if key >= 0 and chr(key) == 'c':
                 # display
                 for i in range(len(self.files)):
                     self.panImages[i].getGrab().clear(6)
         
         #clears the grab so it can be used with the next image
         self.panImages[j+1].getGrab().clear(6)
         
         #returns if there are to few points
         if pts1.shape[0] < 4 or pts2.shape[0]  < 4:
             return 
 
         #points from the second image are being warped to the first
         homography = transform.homography(pts2, pts1)
         self.homographies.append(homography)
         self.panImages[j+1].setHomography(homography)
     
     #setting the np Image of the last picture
     self.panImages[-1].setNpImg( imgutil.cv2array(self.panImages[-1].getImg()))
Exemplo n.º 2
0
    def generateData(self):
        '''
        Acquires frame data from the video.
        '''
        super(VideoCV, self).generateData()

        # the ui will try to load a single frame multiple times
        # TODO: fix the playback model to use a single clock.
        if self._lastIndex == self.getIndex():
            return

        # try to set the frame position if we can't just grab the next one
        if self._lastIndex is not None and self._lastIndex != self.getIndex(
        ) - 1:
            retval = cv.SetCaptureProperty(self._video,
                                           cv.CV_CAP_PROP_POS_FRAMES,
                                           self.getIndex())
            # print("seeking frame {0:4} from {1:4} => {2}".format(self.getIndex(), self._lastIndex, retval))

            # reload video file if there was a problem (often happens after reaching the end of a video)
            if retval == 0:
                self.setFilename(self._filename)

        # grab the next frame
        cvimg = cv.QueryFrame(self._video)
        self._lastIndex = self.getIndex()

        # set as output
        output = imgutil.cv2array(cvimg)[:, :, numpy.r_[2, 1, 0]]
        self.getOutput(0).setData(output)
Exemplo n.º 3
0
def toGrayscale(img):
    newimg=cv.fromarray(img)
    newConvertedImage = cv.CreateImage ((50, 50), cv.IPL_DEPTH_8U, 1)
    cv.cvtColor(newimg,newConvertedImage,cv.CV_BGR2GRAY)
    import imgutil
    newimg=imgutil.cv2array(newConvertedImage)
    return newimg
Exemplo n.º 4
0
def readImageFile(filename):
    '''
    Reads an image from file, using an appropriate method based on the extension
    of the file.  Numpy is used to handle npy and raw images.  OpenCV handles
    common image formats (tif, jpg, png), with proper red-blue channel swapping.
    
    Returns the image as a numpy array.
    '''

    if filename.split(".")[-1] == "npy":
        # load it using numpy
        npimg = numpy.load(filename)

    elif filename.split(".")[-1] == "raw":
        # handle a few different raw frame sizes, based on the file size
        npimg = numpy.fromfile(filename, dtype=numpy.uint16)
        if npimg.size == 1036 * 1388:
            npimg = npimg.reshape((1036, 1388))
        elif npimg.size == 518 * 692:
            npimg = npimg.reshape((518, 692))
        elif npimg.size == 484 * 648:
            npimg = npimg.reshape((484, 648))
        else:
            print "Unknown raw image size:", npimg.shape

    else:
        # load the image using OpenCV
        cvimg = cv.LoadImage(filename)
        npimg = imgutil.cv2array(cvimg)[:, :, numpy.r_[2, 1, 0]]

    return npimg
Exemplo n.º 5
0
   def setupImages(self,cvimg, display = False):
   	'''
   	sets up the images to find the harris corners and patches in the image passed 
   	optionally displays some of the features found  
   	'''
       blue = .114 
       green = .587
       red = .299
       npimg = imgutil.cv2array(cvimg)
       
       
       grey = npimg[:,:,0]*blue + npimg[:,:,1]*green + npimg[:,:,2]*red
       
       harrisPoints = self.harris(grey)
 
       self.drawSquare(cvimg, harrisPoints, self.featureColor)
       
       patches = self.featureDescriptor(grey, harrisPoints, 6, 1)
       
       #displays the descriptos if the user chooses to`
       if display == True:
      		self.displayDescriptors(patches)
       
       w = patches.shape[0]
       N = patches.shape[2]
       #reshapes the discriptors
       reshapePatches = patches.reshape((w**2,N)).transpose() 
       
       return reshapePatches, harrisPoints
Exemplo n.º 6
0
def testRotate():
	import cv
	import optparse
	import imgutil
	
	# handle command line arguments
	parser = optparse.OptionParser()
	parser.add_option("-f", "--filename", help="input image file")
	parser.add_option("-a", "--angle", help="rotation angle in degrees", default=0, type="float")
	options, remain = parser.parse_args()
	if options.filename is None:
		parser.print_help()
		exit(0)
	
	# load image
	cvimg = cv.LoadImage(options.filename)
	npimg = imgutil.cv2array(cvimg)
	
	# rotate image
	h,w = npimg.shape[0:2]
	print h, w
	A = makeCenteredRotation(options.angle, (w/2.0, h/2.0))
	nprot = transformImage(npimg, A, "auto")
	
	imgutil.imageShow(npimg, "original")
	imgutil.imageShow(nprot, "rotate")
	cv.WaitKey(0)
Exemplo n.º 7
0
def test_rotate():
    import cv
    import optparse
    import imgutil

    # handle command line arguments
    parser = optparse.OptionParser()
    parser.add_option("-f", "--filename", help="input image file")
    parser.add_option("-a",
                      "--angle",
                      help="rotation angle in degrees",
                      default=0,
                      type="float")
    options, remain = parser.parse_args()
    if options.filename is None:
        parser.print_help()
        exit(0)

    # load image
    cvimg = cv.LoadImage(options.filename)
    npimg = imgutil.cv2array(cvimg)

    # rotate image
    h, w = npimg.shape[0:2]
    print h, w
    A = make_centered_rotation(options.angle, (w / 2.0, h / 2.0))
    nprot = transform_image(npimg, A, "auto")

    imgutil.image_show(npimg, "original")
    imgutil.image_show(nprot, "rotate")
    cv.WaitKey(0)
Exemplo n.º 8
0
def readImageFile(filename):
    '''
    Reads an image from file, using an appropriate method based on the extension
    of the file.  Numpy is used to handle npy and raw images.  OpenCV handles
    common image formats (tif, jpg, png), with proper red-blue channel swapping.
    
    Returns the image as a numpy array.
    '''
    
    if filename.split(".")[-1] == "npy":
        # load it using numpy
        npimg = numpy.load(filename)
        
    elif filename.split(".")[-1] == "raw":
        # handle a few different raw frame sizes, based on the file size
        npimg = numpy.fromfile(filename, dtype=numpy.uint16)
        if npimg.size == 1036 * 1388:
            npimg = npimg.reshape((1036, 1388))
        elif npimg.size == 518 * 692:
            npimg = npimg.reshape((518, 692))
        elif npimg.size == 484 * 648:
            npimg = npimg.reshape((484, 648))
        else:
            print "Unknown raw image size:", npimg.shape
    
    else:
        # load the image using OpenCV
        cvimg = cv.LoadImage(filename)
        npimg = imgutil.cv2array(cvimg)[:,:,numpy.r_[2, 1, 0]]
        
    return npimg
Exemplo n.º 9
0
    def generateData(self):
        '''
        Acquires frame data from the video.
        '''
        super(VideoCV, self).generateData()
        
        # the ui will try to load a single frame multiple times
        # TODO: fix the playback model to use a single clock.
        if self._lastIndex == self.getIndex():
            return
        
        # try to set the frame position if we can't just grab the next one
        if self._lastIndex is not None and self._lastIndex != self.getIndex() - 1:
            retval = cv.SetCaptureProperty(self._video, cv.CV_CAP_PROP_POS_FRAMES, self.getIndex())
            # print("seeking frame {0:4} from {1:4} => {2}".format(self.getIndex(), self._lastIndex, retval))

            # reload video file if there was a problem (often happens after reaching the end of a video)
            if retval == 0:
                self.setFilename(self._filename)
        
        # grab the next frame
        cvimg = cv.QueryFrame(self._video)
        self._lastIndex = self.getIndex()
        
        # set as output
        output = imgutil.cv2array(cvimg)[:,:,numpy.r_[2, 1, 0]]
        self.getOutput(0).setData(output)
Exemplo n.º 10
0
 def crop_height(self):
     """Returns the height of the middle image"""
     # grab the middle image
     img = cv.LoadImage(self.files[len(self.files) / 2])
     npimg = imgutil.cv2array(img)
     height = npimg.shape[0]
     return height
Exemplo n.º 11
0
 def removeSquares(self):
 	'''
 	Removes the red squares from the images
 	'''
     for i in range(len(self.panImages)):
         cvimg = cv.LoadImage(self.files[i])
         self.panImages[i].setImg(cvimg)
         self.panImages[i].setNpImg(imgutil.cv2array(self.panImages[i].getImg()))
Exemplo n.º 12
0
def toGrayscale(img, x, y):
    #assuming img is imported as an array
    newimg=cv.fromarray(img)
    newConvertedImage = cv.CreateImage ((x, y), cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(newimg, newConvertedImage, cv.CV_BGR2GRAY)
    import imgutil
    newimg=imgutil.cv2array(newConvertedImage)
    return newimg
Exemplo n.º 13
0
    def show_matches(img1, img2, matches1, matches2):
        """Draws lines between matching features
        @img: a cvimg
        @harrisCorners: an array of matches
        """
        npimg1 = imgutil.cv2array(img1)
        npimg2 = imgutil.cv2array(img2)

        # create a new window
        combined = numpy.zeros((max(npimg1.shape[0], npimg2.shape[0]), npimg1.shape[1] + npimg2.shape[1], 3))
        combined[0:npimg1.shape[0], 0:npimg1.shape[1], ...] = npimg1
        combined[0:npimg2.shape[0], npimg1.shape[1]:npimg1.shape[1] + npimg2.shape[1], ...] = npimg2
        combined = imgutil.array2cv(combined)

        # draw lines
        for i in range(matches1.shape[0]):
            cv.Line(combined, (int(matches1[i, 0]), int(matches1[i, 1])),
                    (int(matches2[i, 0] + npimg1.shape[1]), int(matches2[i, 1])), (0, 255, 0))
        combined = imgutil.cv2array(combined)

        # show the image
        imgutil.image_show(combined, "combined")
Exemplo n.º 14
0
	def showMatches(self, img1, img2, matches1, matches2):
		'''
		Draws lines between matching features
		@img: a cvimg
		@harrisCorners: an array of matches
		'''		
		npimg1 = imgutil.cv2array(img1)
		npimg2 = imgutil.cv2array(img2)
		
		#create a new window
		combined = numpy.zeros((max(npimg1.shape[0],npimg2.shape[0]),npimg1.shape[1]+npimg2.shape[1],3))
		combined[0:npimg1.shape[0],0:npimg1.shape[1],...] = npimg1
		combined[0:npimg2.shape[0],npimg1.shape[1]:npimg1.shape[1]+npimg2.shape[1],...] = npimg2
		combined = imgutil.array2cv(combined)
		
		#draw lines
		for i in range(matches1.shape[0]):
			cv.Line(combined, (int(matches1[i,0]),int(matches1[i,1])), (int(matches2[i,0]+npimg1.shape[1]),int(matches2[i,1])), (0, 255, 0))
		combined = imgutil.cv2array(combined)
		
		#show the image
		imgutil.imageShow(combined, "combined")
Exemplo n.º 15
0
  def constPanObjects(self):
  	'''
  	constructing the pan objects including a window for each frame
 		'''
      for i in range(len(self.files)):
          cvimg = cv.LoadImage(self.files[i])
          if self.pick == False:
              panImg = PanObj(i, None, cvimg)
          else:
              cv.NamedWindow(self.enumImageNames[i], cv.CV_WINDOW_NORMAL)
              grab = ginput.Grab(self.enumImageNames[i], 6)
              panImg = PanObj(i, grab, cvimg)
          panImg.setNpImg(imgutil.cv2array(cvimg))
          self.panImages.append(panImg)
      print "panImage", len(self.panImages)
Exemplo n.º 16
0
    def generateData(self):
        '''
        Acquires image data from the camera.
        '''
        super(CameraCV, self).generateData()
        
        if self._camera is not None:
            cvimg = cv.QueryFrame(self._camera)
#            cv.CvtColor(cvimg, cvimg, cv.CV_BGR2RGB)
            output = imgutil.cv2array(cvimg)[..., numpy.r_[2, 1, 0]]
        else:
            # test image if camera doesn't work
            output = numpy.zeros((480,640,3), numpy.int8)
            
        self.getOutput(0).setData(output)
Exemplo n.º 17
0
 def displayMatches(self, matchPts, inliers):
     '''
     a display is shown of the matches that connect the two images
     '''
     matchOne = matchPts[:,:2]
     matchTwo = matchPts[:,2:]
     
     self.drawSquare(self.imageOne, matchOne, self.matchColor)
     self.drawSquare(self.imageTwo, matchTwo, self.matchColor)
     
     inlierMatchOne = matchOne[inliers]
     inlierMatchTwo = matchTwo[inliers]
     
     self.drawSquare(self.imageOne, inlierMatchOne, self.inliersColor)
     self.drawSquare(self.imageTwo, inlierMatchTwo, self.inliersColor)
     
     npimg = imgutil.cv2array(self.imageOne)
     npimg2 = imgutil.cv2array(self.imageTwo)
     
     displayPic = numpy.hstack((npimg,npimg2))
     
     #add the width of image one to image two
     #inlierMatchTwo[:,0] =  inlierMatchTwo[:,0] + npimg.shape[1]
     
     cvimg = imgutil.array2cv(displayPic)
     
     
     for i in range(inlierMatchOne.shape[0]):
         
         x1, y1 = int(inlierMatchOne[i,0]), int(inlierMatchOne[i,1])
         x2, y2 = int(inlierMatchTwo[i,0]+ npimg.shape[1]), int(inlierMatchTwo[i,1])
         
         cv.Line(cvimg, (x1,y1), ( x2, y2), self.inliersColor )
     
     imgutil.imageShow(cvimg, "matchImg")
     cv.WaitKey(0)
Exemplo n.º 18
0
    def generateData(self):
        '''
        Acquires image data from the camera.
        '''
        super(CameraCV, self).generateData()

        if self._camera is not None:
            cvimg = cv.QueryFrame(self._camera)
            #            cv.CvtColor(cvimg, cvimg, cv.CV_BGR2RGB)
            output = imgutil.cv2array(cvimg)[..., numpy.r_[2, 1, 0]]
        else:
            # test image if camera doesn't work
            output = numpy.zeros((480, 640, 3), numpy.int8)

        self.getOutput(0).setData(output)
Exemplo n.º 19
0
	def corners(self, homography):
		'''
		Finds the corners of the images
		@homography: a list of homographies
		@return: an array of corners of the global window
		'''
		#find the corners of all the images
		cornerL = []
		midCorners = None
		for i in range(len(self.files)):
			#convert the file
			cvimg = cv.LoadImage(self.files[i])
			npimg = imgutil.cv2array(cvimg)
			
			# set up the corners in an array
			h, w = npimg.shape[0:2]
			corners = numpy.array( [[ 0, w, w, 0],
									[ 0, 0, h, h]],dtype=float)
			corners = transform.homogeneous(corners)
			tform = homography[i]
			A = numpy.dot(tform, corners)
			A = transform.homogeneous(A)
			A = A.astype(int)
			cornerL.append(A)
			
			if i == len(self.files)/2:
				midCorners = A
		
		#  find the new corners of the image 
		w1L = []
		w2L = []
		h1L = []
		h2L = []
		for i in range(len(cornerL)):
			w1L.append(numpy.min(cornerL[i][0,:]))
			w2L.append(numpy.max(cornerL[i][0,:]))
			h1L.append(numpy.min(cornerL[i][1,:]))
			h2L.append(numpy.max(cornerL[i][1,:]))
		w1 = min(w1L)
		w2 = max(w2L)
		h1 = min(h1L)
		h2 = max(h2L)
		
		#set up array to return
		ndarray = numpy.array([(w1, h1), (w2, h2)])
		
		return ndarray,midCorners
Exemplo n.º 20
0
    def corners(self, homography):
        """Finds the corners of the images
        @homography: a list of homographies
        @return: an array of corners of the global window
        """
        # find the corners of all the images
        cornerL = []
        midCorners = None
        for i in range(len(self.files)):
            # convert the file
            cvimg = cv.LoadImage(self.files[i])
            npimg = imgutil.cv2array(cvimg)

            # set up the corners in an array
            h, w = npimg.shape[0:2]
            corners = numpy.array([[0, w, w, 0],
                                   [0, 0, h, h]], dtype=float)
            corners = transform.homogeneous(corners)
            tform = homography[i]
            A = numpy.dot(tform, corners)
            A = transform.homogeneous(A)
            A = A.astype(int)
            cornerL.append(A)

            if i == len(self.files) / 2:
                midCorners = A

        # find the new corners of the image
        w1L = []
        w2L = []
        h1L = []
        h2L = []
        for i in range(len(cornerL)):
            w1L.append(numpy.min(cornerL[i][0, :]))
            w2L.append(numpy.max(cornerL[i][0, :]))
            h1L.append(numpy.min(cornerL[i][1, :]))
            h2L.append(numpy.max(cornerL[i][1, :]))
        w1 = min(w1L)
        w2 = max(w2L)
        h1 = min(h1L)
        h2 = max(h2L)

        # set up array to return
        ndarray = numpy.array([(w1, h1), (w2, h2)])

        return ndarray, midCorners
Exemplo n.º 21
0
    def testRectifying(self):
        '''
        This function tests the rectification of an image 
        it is structured so you have to manually manipulate 
        the four corners that the points you choose to go to
    	'''
        cv.NamedWindow("grab", cv.CV_WINDOW_NORMAL)
        grab = ginput.Grab("grab", 4)
        
        key = None
        while key != 27:
            # grab frame
            cvimg = cv.LoadImage("../perspective.jpg")
              
            # draw points
            pts = grab.getPoints()
            for pxy in pts:
                cv.Rectangle(cvimg, (pxy[0]-15, pxy[1]-15), (pxy[0]+15, pxy[1]+15), (0, 0, 255))   
            
            # display
            cv.ShowImage("grab", cvimg)
                
            # handle keys
            key = cv.WaitKey(100)
            if key >= 0 and chr(key) == 'c':
                grab.clear(4)

        if pts.shape[0] != 4:
            return 
        npimg = imgutil.cv2array(cvimg)
        
        #The points that the point you choose are transformed to
        squarePts = numpy.array([[0,0],
                                 [100, 0],
                                 [0,200],
                                 [100,200]])
        
        #Finds the homography based on the points 
        homography = transform.homography(pts, squarePts)
        
        output = transform.transformImage(npimg, homography, "auto")
            
        imgutil.imageShow(output, "Homography")
        # handle keys
        cv.WaitKey(0)
Exemplo n.º 22
0
    def createPanorama(self, display = False):
        '''
        builds the entire panorama 
        '''
        #adding the identity matrix as the first matrix
        self.homographies.append(self.identity)
        
        self.constPanObjects()
        
        if self.pick == False:
            self.setHomographies()
        else:
            self.pickPoints()
        
        if len(self.panImages) == 0:
        	print "You don't have any images"
        	exit()
        
        #setting the np Image of the last picture
        self.panImages[-1].setNpImg( imgutil.cv2array(self.panImages[-1].getImg()))
        
        self.removeSquares()
        
        self.panImages[0].setHomography(self.identity)
        self.MapHomography()
        
        #gets all the corners of the Panorama    
        corners = self.panCorners()
        
        #transforms all the images based on there homographies
        for panImg in self.panImages:
            npimg = panImg.getNpImg()
            hom = panImg.getHomography()
            output = transform.transformImage(npimg, hom, corners)
            panImg.setOutput(output)
        
        if display == True:
        	for panImg in self.panImages:
        		imgutil.imageShow(panImg.getOutput(), "Panorama")
        
        average =  self.combineImage() 

        imgutil.imageShow(average)
        cv.WaitKey(0)
Exemplo n.º 23
0
	def panorama(self, sigma):
		'''
		Creates a panorama with alpha stitching and displays
		'''
		#print "\n--------------------------------"
		#print "Panorama "
		#print "--------------------------------\n"
		#list to hold the homographies
		inlierL = []
		homography = [numpy.matrix(numpy.identity(3))]
		
		# find the homography between each set of pictures
		for i in range(len(self.files)-1):
			#get everything for image 1
			img1 = cv.LoadImage(self.files[i])
			npimg1 = imgutil.cv2array(img1)
			npimg1 = self.grayscale(npimg1)
			pts1 = feature.harris(npimg1,count=512)
			desc1 = self.extract(npimg1, pts1)
			
			#get everything for image 2
			img2 = cv.LoadImage(self.files[i+1])
			npimg2 = imgutil.cv2array(img2)
			npimg2 = self.grayscale(npimg2)
			pts2 = feature.harris(npimg2,count=512)
			desc2 = self.extract(npimg2, pts2)
			
			matches = self.matching(desc1,desc2)
			self.showHarris(img1, pts1[matches[:,0]])
			self.showHarris(img2, pts2[matches[:,1]])
			
			"""
			montagePts = feature.harris(npimg1,count=20)
			montageDesc = self.extract(npimg1, montagePts)
			montage = self.montage(montageDesc, numCols=5)
			imgutil.imageShow(montage, "montage")
			"""
			
			imgutil.imageShow(img1,"image1")
			imgutil.imageShow(img2,"image2")
			#cv.WaitKey(0)
			
			matches1 = pts1[matches[:,0],0:2]
			matches2 = pts2[matches[:,1],0:2]
			data = numpy.hstack((matches1,matches2))
			
			h = self.ransac(data,0.5)
			self.showMatches(img1, img2, data[h[1]][:,0,0:2], data[h[1]][:,0,2:])
			
			homography.append(numpy.linalg.inv(h[0]))
			inlierL.append(h[1])
			
		#print "List of homographies: "
		#print homography
		
		midHomographyL = []
		#map all the homographies to image 1
		for i in range(1,len(homography)):
			homography[i] =  homography[i-1] * homography[i]
		
		middle = len(self.files)/2
		for i in range(len(homography)):
			#warp mid,  Him = Hm0^-1 * Hi0 where m is middle image
			inverse = numpy.linalg.inv(homography[middle])
			midHomography = inverse * homography[i]
			midHomographyL.append(midHomography)
		
		#find bounds of global extent and original picture
		warpedL = []
		output_range = self.corners(midHomographyL)[0]
		midCorners = self.corners(midHomographyL)[1]
		
		# warp the images
		for i in range(len(self.files)):
			#convert the file
			cvimg = cv.LoadImage(self.files[i])
			npimg = imgutil.cv2array(cvimg)
			
			#compute the gaussian weight
			h = npimg.shape[0]
			w = npimg.shape[1]
			yy,xx = numpy.mgrid[0:h,0:w]
			dist = (yy - h/2)**2 + (xx - w/2)**2
			gwt = numpy.exp(-dist/(2.0*sigma**2))
			
			#add the gaussian weight as the 4th channel
			npimg = numpy.dstack((npimg,gwt))
			
			#append the warped image to the list
			warpedImg = transform.transformImage(npimg,midHomographyL[i], output_range)
			warpedL.append(warpedImg)
			
			imgutil.imageShow(warpedImg, "test")
		
		#stich the images
		top = numpy.zeros(warpedL[0].shape,dtype=float)
		bot = numpy.zeros(warpedL[0].shape,dtype=float)
		bot[:,:,3]=1.0
		for i in range(len(warpedL)):
			top[:,:,0] += warpedL[i][:,:,3] * warpedL[i][:,:,0]
			top[:,:,1] += warpedL[i][:,:,3] * warpedL[i][:,:,1]
			top[:,:,2] += warpedL[i][:,:,3] * warpedL[i][:,:,2]
			top[:,:,3] += warpedL[i][:,:,3]
			bot[:,:,0] += warpedL[i][:,:,3]
			bot[:,:,1] += warpedL[i][:,:,3]
			bot[:,:,2] += warpedL[i][:,:,3]
		
		bot[bot == 0] = 1
	
		output = top/bot

		#autoCrop if it is on
		if self.autoCrop:
			output = self.crop(output, output_range, midCorners[0:2,...])
		
		#show the panorama
		print "showing panorama"
		imgutil.imageShow(output, "final")
		cv.WaitKey(0)
Exemplo n.º 24
0
	# compute Harris feature strength, avoiding divide by zero
	imgH = (Ixx * Iyy - Ixy**2) / (Ixx + Iyy + 1e-8)
		
	# exclude points near the image border
	imgH[:16, :] = 0
	imgH[-16:, :] = 0
	imgH[:, :16] = 0
	imgH[:, -16:] = 0
	
	# non-maximum suppression in 5x5 regions
	maxH = filters.maximum_filter(imgH, (5,5))
	imgH = imgH * (imgH == maxH)
	
	# sort points by strength and find their positions
	sortIdx = numpy.argsort(imgH.flatten())[::-1]
	sortIdx = sortIdx[:count]
	yy = sortIdx / w
	xx = sortIdx % w
		
	# concatenate positions and values
	xyv = numpy.vstack((xx, yy, imgH.flatten()[sortIdx])).transpose()
	
	return xyv
	
	
if __name__ == "__main__":
	img = cv.LoadImage("stained2.jpg")
	npimg = imgutil.cv2array(img)
	pts = harris(npimg[:,:,1])
	print pts
Exemplo n.º 25
0
import imgutil

#first import all images and the target image
all=batchRead.main()
#then import the image (already grayscaled)
target=ImgIO.readFile('target.jpg')

(x,y)=target.shape
##build a new 4d array
#t=np.ndarray([x,y,50,50], dtype=np.uint8)

#build a new picture:

#this is for grayscale:
newConvertedImage = cv.CreateImage ((x*50, y*50), cv.IPL_DEPTH_8U, 1)
image=imgutil.cv2array(newConvertedImage)

#map=intensity.toMap(all)

map=intensity.toMap(all)

for i in range(len(target)):
    for j in range(len(target[i])):
        #print (i,j)
        targetedPixel=target[i][j]
        tile=intensity.mapFind(targetedPixel, map)
        #pixelwise copying:
        for m in range(len(tile)):
            for n in range(len(tile[m])):
                print 'processando pixel:'+str((i*50+m,j*50+n))
                #print tile[m][n]
Exemplo n.º 26
0
    # compute Harris feature strength, avoiding divide by zero
    imgH = (Ixx * Iyy - Ixy**2) / (Ixx + Iyy + 1e-8)

    # exclude points near the image border
    imgH[:16, :] = 0
    imgH[-16:, :] = 0
    imgH[:, :16] = 0
    imgH[:, -16:] = 0

    # non-maximum suppression in 5x5 regions
    maxH = filters.maximum_filter(imgH, (5, 5))
    imgH = imgH * (imgH == maxH)

    # sort points by strength and find their positions
    sortIdx = numpy.argsort(imgH.flatten())[::-1]
    sortIdx = sortIdx[:count]
    yy = sortIdx / w
    xx = sortIdx % w

    # concatenate positions and values
    xyv = numpy.vstack((xx, yy, imgH.flatten()[sortIdx])).transpose()

    return xyv


if __name__ == "__main__":
    img = cv.LoadImage("stained2.jpg")
    npimg = imgutil.cv2array(img)
    pts = harris(npimg[:, :, 1])
    print pts
Exemplo n.º 27
0
    def panorama(self, sigma):
        """Creates a panorama with alpha stitching and displays"""
        # print "\n--------------------------------"
        # print "Panorama "
        # print "--------------------------------\n"
        # list to hold the homographies
        inlier_list = []
        homography = [numpy.matrix(numpy.identity(3))]

        # find the homography between each set of pictures
        for i in range(len(self.files) - 1):
            # get everything for image 1
            img1 = cv.LoadImage(self.files[i])
            npimg1 = imgutil.cv2array(img1)
            npimg1 = self.grayscale(npimg1)
            pts1 = feature.harris(npimg1, count=512)
            desc1 = self.extract(npimg1, pts1)

            # get everything for image 2
            img2 = cv.LoadImage(self.files[i + 1])
            npimg2 = imgutil.cv2array(img2)
            npimg2 = self.grayscale(npimg2)
            pts2 = feature.harris(npimg2, count=512)
            desc2 = self.extract(npimg2, pts2)

            matches = self.matching(desc1, desc2)
            self.show_harris(img1, pts1[matches[:, 0]])
            self.show_harris(img2, pts2[matches[:, 1]])

            # montagePts = feature.harris(npimg1,count=20)
            # montageDesc = self.extract(npimg1, montagePts)
            # montage = self.montage(montageDesc, numCols=5)
            # imgutil.imageShow(montage, "montage")

            imgutil.image_show(img1, "image1")
            imgutil.image_show(img2, "image2")

            matches1 = pts1[matches[:, 0], 0:2]
            matches2 = pts2[matches[:, 1], 0:2]
            data = numpy.hstack((matches1, matches2))

            h = self.ransac(data, 0.5)
            self.show_matches(img1, img2, data[h[1]][:, 0, 0:2], data[h[1]][:, 0, 2:])

            homography.append(numpy.linalg.inv(h[0]))
            inlier_list.append(h[1])

        # print "List of homographies: "
        # print homography

        mid_homography_list = []
        # map all the homographies to image 1
        for i in range(1, len(homography)):
            homography[i] = homography[i - 1] * homography[i]

        middle = len(self.files) / 2
        for i in range(len(homography)):
            # warp mid,  Him = Hm0^-1 * Hi0 where m is middle image
            inverse = numpy.linalg.inv(homography[middle])
            midHomography = inverse * homography[i]
            mid_homography_list.append(midHomography)

        # find bounds of global extent and original picture
        warpedL = []
        output_range = self.corners(mid_homography_list)[0]
        midCorners = self.corners(mid_homography_list)[1]

        # warp the images
        for i in range(len(self.files)):
            # convert the file
            cvimg = cv.LoadImage(self.files[i])
            npimg = imgutil.cv2array(cvimg)

            # compute the gaussian weight
            h = npimg.shape[0]
            w = npimg.shape[1]
            yy, xx = numpy.mgrid[0:h, 0:w]
            dist = (yy - h / 2) ** 2 + (xx - w / 2) ** 2
            gwt = numpy.exp(-dist / (2.0 * sigma ** 2))

            # add the gaussian weight as the 4th channel
            npimg = numpy.dstack((npimg, gwt))

            # append the warped image to the list
            warpedImg = transform.transform_image(npimg, mid_homography_list[i], output_range)
            warpedL.append(warpedImg)

            imgutil.image_show(warpedImg, "test")

        # stitch the images
        top = numpy.zeros(warpedL[0].shape, dtype=float)
        bot = numpy.zeros(warpedL[0].shape, dtype=float)
        bot[:, :, 3] = 1.0
        for i in range(len(warpedL)):
            top[:, :, 0] += warpedL[i][:, :, 3] * warpedL[i][:, :, 0]
            top[:, :, 1] += warpedL[i][:, :, 3] * warpedL[i][:, :, 1]
            top[:, :, 2] += warpedL[i][:, :, 3] * warpedL[i][:, :, 2]
            top[:, :, 3] += warpedL[i][:, :, 3]
            bot[:, :, 0] += warpedL[i][:, :, 3]
            bot[:, :, 1] += warpedL[i][:, :, 3]
            bot[:, :, 2] += warpedL[i][:, :, 3]

        bot[bot == 0] = 1

        output = top / bot

        # autoCrop if it is on
        if self.auto_crop:
            output = self.crop(output, output_range, midCorners[0:2, ...])

        # show the panorama
        print "showing panorama"
        imgutil.image_show(output, "final")
        cv.WaitKey(0)