def testRotate(): import cv import optparse import imgutil # handle command line arguments parser = optparse.OptionParser() parser.add_option("-f", "--filename", help="input image file") parser.add_option("-a", "--angle", help="rotation angle in degrees", default=0, type="float") options, remain = parser.parse_args() if options.filename is None: parser.print_help() exit(0) # load image cvimg = cv.LoadImage(options.filename) npimg = imgutil.cv2array(cvimg) # rotate image h,w = npimg.shape[0:2] print h, w A = makeCenteredRotation(options.angle, (w/2.0, h/2.0)) nprot = transformImage(npimg, A, "auto") imgutil.imageShow(npimg, "original") imgutil.imageShow(nprot, "rotate") cv.WaitKey(0)
def montage(self, descriptors, numCols=32): ''' Displays all of the descriptors in a montage. @descriptors: a numpy array of feature descriptors with size w x w x N @numCols: the number of descriptors to display @return: a numpy array (the montage) ''' numPatches = descriptors.shape[2] numRows = numPatches/numCols sideLength = descriptors.shape[0] #resize descriptors into a montage montage = numpy.zeros((sideLength*numRows,sideLength*numCols)) #place the descriptors on a canvas count = 0 for i in range(numRows): for j in range(numCols): montage[i*sideLength:(i+1)*sideLength,j*sideLength:(j+1)*sideLength] = descriptors[...,count] count += 1 #show the image imgutil.imageShow(montage, title="montage", norm=True) #cv.WaitKey(0) return montage
def displayDescriptors(self, descriptorArray): ''' Displays the first twenty five prominent features in a grid from the descriptorArray Returns the image ''' wAndH = descriptorArray.shape[0] numDesc = descriptorArray.shape[2] #sets up the numpy image where the descriptors will be put img = numpy.zeros((5*wAndH,5*wAndH)) for j in range(5): for i in range(5): #checks to make sure you dont go over the number of descriptors if i*5+j <= numDesc: startY = i * wAndH endY = (i + 1) * wAndH startX = j * wAndH endX = (j + 1) * wAndH img[startY:endY, startX:endX] = descriptorArray[:,:,i*5+j] imgutil.imageShow(img, "descriptor") return img
def settleAutoExposure(self, numFrames=30, display=True): ''' Run image acquisition for a while in auto exposure mode. ''' self.setAutoExposure(True) self.startTransmission() for i in range(numFrames): npimg = self.acquireFrame() if display: imgutil.imageShow(npimg, "Auto", False, wait=10) self.setAutoExposure(False)
def testStart(): # connect to firewire camera, select frame format fwcam = FirewireVideo(0, DC1394_ISO_SPEED_800) fwcam.setVideoMode(DC1394_VIDEO_MODE_1024x768_RGB8, DC1394_FRAMERATE_15) # set up camera parameters fwcam.setExposureAbsolute(brightness=0, gamma=1.0) fwcam.setAutoExposure(True) fwcam.setColorAbsolute(whiteBlue=1023, whiteRed=276) # start grabbing video frames fwcam.startTransmission() # display frames forever index = 0 poll = 100 t0 = time.time() key = None while key != 27: # grab a frame frame = fwcam.acquireFrame() key = imgutil.imageShow(frame, "fwvideo", False, 10) index += 1 t1 = time.time() if index % poll == 0: fwcam.printFeatures(False) print "{0:8}: {1:8.3f} fps".format(index, float(poll) / (t1-t0)) t0 = t1 # disconnect from camera fwcam.setAutoExposure(False) fwcam.stopTransmission() fwcam.closeVideoDevice()
def testStart(): # connect to firewire camera, select frame format fwcam = FirewireVideo(0, DC1394_ISO_SPEED_800) fwcam.setVideoMode(DC1394_VIDEO_MODE_640x480_RGB8, DC1394_FRAMERATE_30) # set up camera parameters fwcam.setExposureAbsolute(brightness=0, gamma=1.0) fwcam.setAutoExposure(True) fwcam.setColorAbsolute(whiteBlue=1023, whiteRed=276) # start grabbing video frames fwcam.startTransmission() # display frames forever index = 0 poll = 100 t0 = time.time() key = None while key != 27: # grab a frame frame = fwcam.acquireFrame() key = imgutil.imageShow(frame, "fwvideo", False, 10) index += 1 t1 = time.time() if index % poll == 0: fwcam.printFeatures(False) print "{0:8}: {1:8.3} fps".format(index, poll / (t1-t0)) t0 = t1 # disconnect from camera fwcam.setAutoExposure(False) fwcam.stopTransmission() fwcam.closeVideoDevice()
def testRectifying(self): ''' This function tests the rectification of an image it is structured so you have to manually manipulate the four corners that the points you choose to go to ''' cv.NamedWindow("grab", cv.CV_WINDOW_NORMAL) grab = ginput.Grab("grab", 4) key = None while key != 27: # grab frame cvimg = cv.LoadImage("../perspective.jpg") # draw points pts = grab.getPoints() for pxy in pts: cv.Rectangle(cvimg, (pxy[0]-15, pxy[1]-15), (pxy[0]+15, pxy[1]+15), (0, 0, 255)) # display cv.ShowImage("grab", cvimg) # handle keys key = cv.WaitKey(100) if key >= 0 and chr(key) == 'c': grab.clear(4) if pts.shape[0] != 4: return npimg = imgutil.cv2array(cvimg) #The points that the point you choose are transformed to squarePts = numpy.array([[0,0], [100, 0], [0,200], [100,200]]) #Finds the homography based on the points homography = transform.homography(pts, squarePts) output = transform.transformImage(npimg, homography, "auto") imgutil.imageShow(output, "Homography") # handle keys cv.WaitKey(0)
def createPanorama(self, display = False): ''' builds the entire panorama ''' #adding the identity matrix as the first matrix self.homographies.append(self.identity) self.constPanObjects() if self.pick == False: self.setHomographies() else: self.pickPoints() if len(self.panImages) == 0: print "You don't have any images" exit() #setting the np Image of the last picture self.panImages[-1].setNpImg( imgutil.cv2array(self.panImages[-1].getImg())) self.removeSquares() self.panImages[0].setHomography(self.identity) self.MapHomography() #gets all the corners of the Panorama corners = self.panCorners() #transforms all the images based on there homographies for panImg in self.panImages: npimg = panImg.getNpImg() hom = panImg.getHomography() output = transform.transformImage(npimg, hom, corners) panImg.setOutput(output) if display == True: for panImg in self.panImages: imgutil.imageShow(panImg.getOutput(), "Panorama") average = self.combineImage() imgutil.imageShow(average) cv.WaitKey(0)
def showMatches(self, img1, img2, matches1, matches2): ''' Draws lines between matching features @img: a cvimg @harrisCorners: an array of matches ''' npimg1 = imgutil.cv2array(img1) npimg2 = imgutil.cv2array(img2) #create a new window combined = numpy.zeros((max(npimg1.shape[0],npimg2.shape[0]),npimg1.shape[1]+npimg2.shape[1],3)) combined[0:npimg1.shape[0],0:npimg1.shape[1],...] = npimg1 combined[0:npimg2.shape[0],npimg1.shape[1]:npimg1.shape[1]+npimg2.shape[1],...] = npimg2 combined = imgutil.array2cv(combined) #draw lines for i in range(matches1.shape[0]): cv.Line(combined, (int(matches1[i,0]),int(matches1[i,1])), (int(matches2[i,0]+npimg1.shape[1]),int(matches2[i,1])), (0, 255, 0)) combined = imgutil.cv2array(combined) #show the image imgutil.imageShow(combined, "combined")
def displayMatches(self, matchPts, inliers): ''' a display is shown of the matches that connect the two images ''' matchOne = matchPts[:,:2] matchTwo = matchPts[:,2:] self.drawSquare(self.imageOne, matchOne, self.matchColor) self.drawSquare(self.imageTwo, matchTwo, self.matchColor) inlierMatchOne = matchOne[inliers] inlierMatchTwo = matchTwo[inliers] self.drawSquare(self.imageOne, inlierMatchOne, self.inliersColor) self.drawSquare(self.imageTwo, inlierMatchTwo, self.inliersColor) npimg = imgutil.cv2array(self.imageOne) npimg2 = imgutil.cv2array(self.imageTwo) displayPic = numpy.hstack((npimg,npimg2)) #add the width of image one to image two #inlierMatchTwo[:,0] = inlierMatchTwo[:,0] + npimg.shape[1] cvimg = imgutil.array2cv(displayPic) for i in range(inlierMatchOne.shape[0]): x1, y1 = int(inlierMatchOne[i,0]), int(inlierMatchOne[i,1]) x2, y2 = int(inlierMatchTwo[i,0]+ npimg.shape[1]), int(inlierMatchTwo[i,1]) cv.Line(cvimg, (x1,y1), ( x2, y2), self.inliersColor ) imgutil.imageShow(cvimg, "matchImg") cv.WaitKey(0)
def testScale(): # pipeSource = source.CameraFW(0, # FirewireVideo.DC1394_VIDEO_MODE_640x480_RGB8, # FirewireVideo.DC1394_FRAMERATE_15) # pipeSource.getCamera().setColorAbsolute(whiteBlue=1023, whiteRed=276) pipeSource = source.CameraCV() #pipeSource = source.FileReader("../hw3/leukemia_40x.png") gainBias = GainBias(pipeSource.getOutput()) key = None frame = 0 t0 = time.time() span = 30 while key != 27: pipeSource.updatePlayMode() gainBias.update() key = imgutil.imageShow( gainBias.getOutput().getData(), "GB", False, 10) if key >= 0: key &= 255 # Convert Unicode key to ASCII char = chr(key) print "Key: ", key, char if char == "-": gainBias.setScale(gainBias.getScale() * 0.95) elif char == "=": gainBias.setScale(gainBias.getScale() * 1.05) elif char == "[": gainBias.setOffset(gainBias.getOffset() - 5.0) elif char == "]": gainBias.setOffset(gainBias.getOffset() + 5.0) elif char == "r": gainBias.setScale(1.0) gainBias.setOffset(0.0) elif char == "a": gainBias.setAutoContrast(not gainBias.getAutoContrast()) elif char == "p": gainBias.setPreserveIntensity(not gainBias.getPreserveIntensity()) frame += 1 if frame % span == 0: t1 = time.time() print "{0:8.5f} fps".format(span / (t1 - t0)) t0 = t1
def testCameraFW(): # acquire images from the firewire camera camfw = CameraFW(0, FirewireVideo.DC1394_VIDEO_MODE_800x600_RGB8, FirewireVideo.DC1394_FRAMERATE_15) # cycle through images until escape is pressed key = None i = 0 while key != 27: camfw.updatePlayMode() camfw.update() key = imgutil.imageShow(camfw.getOutput(0).getData(), "pipeline", False, 10) i += 1 if i % 20 == 0: print "{0}: {1:8.3f} fps".format(i, camfw.getCamera().computeFramerate())
def testCameraFW(): # acquire images from the firewire camera camfw = CameraFW(0, FirewireVideo.DC1394_VIDEO_MODE_800x600_RGB8, FirewireVideo.DC1394_FRAMERATE_15) # cycle through images until escape is pressed key = None i = 0 while key != 27: camfw.updatePlayMode() camfw.update() key = imgutil.imageShow( camfw.getOutput(0).getData(), "pipeline", False, 10) i += 1 if i % 20 == 0: print "{0}: {1:8.3f} fps".format( i, camfw.getCamera().computeFramerate())
def test_scale_offset(): ''' Displays video and tests the scale_offset operator. ''' self.add_operator(Scale_Offset) #TODO: might want to have display code separate from this function # to avoid duplication (definately wnat something like this though) key = None frame = 0 t0 = time.time() span = 30 # while we're not pressing escape... while key != 27: # update the pipeline's source and the operator self.source.updatePlayMode() self.update_pipeline() key = imgutil.imageShow( self.last_operator.getOutput().getData(), "pipeline", False, 10) if key >= 0: char = chr(key) print "Key: ", key, char if char == "-": scale_offset.setScale(scale_offset.getScale() * 0.95) elif char == "=": scale_offset.setScale(scale_offset.getScale() * 1.05) elif char == "[": scale_offset.setOffset(scale_offset.getOffset() - 5.0) elif char == "]": scale_offset.setOffset(scale_offset.getOffset() + 5.0) elif char == "r": scale_offset.setScale(1.0) scale_offset.setOffset(0.0) frame += 1 if frame % span == 0: t1 = time.time() print "{0:8.5f} fps".format(span / (t1 - t0)) t0 = t1
def testScale(): # pipeSource = source.CameraFW(0, # FirewireVideo.DC1394_VIDEO_MODE_640x480_RGB8, # FirewireVideo.DC1394_FRAMERATE_15) # pipeSource.getCamera().setColorAbsolute(whiteBlue=1023, whiteRed=276) pipeSource = source.CameraCV() scale = Scale(pipeSource.getOutput()) offset = Offset(scale.getOutput()) key = None frame = 0 t0 = time.time() span = 30 while key != 27: pipeSource.updatePlayMode() offset.update() key = imgutil.imageShow( offset.getOutput().getData(), "pipeline", False, 10) if key >= 0: char = chr(key) print "Key: ", key, char if char == "-": scale.setScale(scale.getScale() * 0.95) elif char == "=": scale.setScale(scale.getScale() * 1.05) elif char == "[": offset.setOffset(offset.getOffset() - 5.0) elif char == "]": offset.setOffset(offset.getOffset() + 5.0) elif char == "r": scale.setScale(1.0) offset.setOffset(0.0) frame += 1 if frame % span == 0: t1 = time.time() print "{0:8.5f} fps".format(span / (t1 - t0)) t0 = t1
def panorama(self, sigma): ''' Creates a panorama with alpha stitching and displays ''' #print "\n--------------------------------" #print "Panorama " #print "--------------------------------\n" #list to hold the homographies inlierL = [] homography = [numpy.matrix(numpy.identity(3))] # find the homography between each set of pictures for i in range(len(self.files)-1): #get everything for image 1 img1 = cv.LoadImage(self.files[i]) npimg1 = imgutil.cv2array(img1) npimg1 = self.grayscale(npimg1) pts1 = feature.harris(npimg1,count=512) desc1 = self.extract(npimg1, pts1) #get everything for image 2 img2 = cv.LoadImage(self.files[i+1]) npimg2 = imgutil.cv2array(img2) npimg2 = self.grayscale(npimg2) pts2 = feature.harris(npimg2,count=512) desc2 = self.extract(npimg2, pts2) matches = self.matching(desc1,desc2) self.showHarris(img1, pts1[matches[:,0]]) self.showHarris(img2, pts2[matches[:,1]]) """ montagePts = feature.harris(npimg1,count=20) montageDesc = self.extract(npimg1, montagePts) montage = self.montage(montageDesc, numCols=5) imgutil.imageShow(montage, "montage") """ imgutil.imageShow(img1,"image1") imgutil.imageShow(img2,"image2") #cv.WaitKey(0) matches1 = pts1[matches[:,0],0:2] matches2 = pts2[matches[:,1],0:2] data = numpy.hstack((matches1,matches2)) h = self.ransac(data,0.5) self.showMatches(img1, img2, data[h[1]][:,0,0:2], data[h[1]][:,0,2:]) homography.append(numpy.linalg.inv(h[0])) inlierL.append(h[1]) #print "List of homographies: " #print homography midHomographyL = [] #map all the homographies to image 1 for i in range(1,len(homography)): homography[i] = homography[i-1] * homography[i] middle = len(self.files)/2 for i in range(len(homography)): #warp mid, Him = Hm0^-1 * Hi0 where m is middle image inverse = numpy.linalg.inv(homography[middle]) midHomography = inverse * homography[i] midHomographyL.append(midHomography) #find bounds of global extent and original picture warpedL = [] output_range = self.corners(midHomographyL)[0] midCorners = self.corners(midHomographyL)[1] # warp the images for i in range(len(self.files)): #convert the file cvimg = cv.LoadImage(self.files[i]) npimg = imgutil.cv2array(cvimg) #compute the gaussian weight h = npimg.shape[0] w = npimg.shape[1] yy,xx = numpy.mgrid[0:h,0:w] dist = (yy - h/2)**2 + (xx - w/2)**2 gwt = numpy.exp(-dist/(2.0*sigma**2)) #add the gaussian weight as the 4th channel npimg = numpy.dstack((npimg,gwt)) #append the warped image to the list warpedImg = transform.transformImage(npimg,midHomographyL[i], output_range) warpedL.append(warpedImg) imgutil.imageShow(warpedImg, "test") #stich the images top = numpy.zeros(warpedL[0].shape,dtype=float) bot = numpy.zeros(warpedL[0].shape,dtype=float) bot[:,:,3]=1.0 for i in range(len(warpedL)): top[:,:,0] += warpedL[i][:,:,3] * warpedL[i][:,:,0] top[:,:,1] += warpedL[i][:,:,3] * warpedL[i][:,:,1] top[:,:,2] += warpedL[i][:,:,3] * warpedL[i][:,:,2] top[:,:,3] += warpedL[i][:,:,3] bot[:,:,0] += warpedL[i][:,:,3] bot[:,:,1] += warpedL[i][:,:,3] bot[:,:,2] += warpedL[i][:,:,3] bot[bot == 0] = 1 output = top/bot #autoCrop if it is on if self.autoCrop: output = self.crop(output, output_range, midCorners[0:2,...]) #show the panorama print "showing panorama" imgutil.imageShow(output, "final") cv.WaitKey(0)
def main(): # set up command line parameters parser = optparse.OptionParser() parser.add_option("-p", "--path", help="the path of the image folder", default="./bacteria/") parser.add_option("-r", "--rect", help="rectangle to crop", default="95,925,40,1315") parser.add_option("-n", "--num", help="number of background frames", default=60) parser.add_option("-f", "--ff", help="flatfield file to write/read from", default=None) parser.add_option("-w", "--write", help="write to ff and bg file", default=False, action="store_true") parser.add_option("-b", "--bg", help="bg file to write/read from", default=None) parser.add_option("-a", "--alpha", help="number of standard deviations to use in segmentation", default=4) parser.add_option("-s", "--save", help="save displays to disk", default=False, action="store_true") options, remain = parser.parse_args() # parse command line parameters rawFlat = options.path + "/flat*.raw" rawBact = options.path + "/bact*.raw" bgFrames = int(options.num) coords = options.rect coords.strip("()") coords = tuple(int(x) for x in coords[:].split(",")) # check if fffile exists try: if options.ff: fffile = open(options.ff, "w" if options.write else "r") else: fffile = None except IOError: print "Can't read from ff file" return 0 # check if bgfile exists try: if options.bg: bgfile = open(options.bg, "w" if options.write else "r") else: bgfile = None except IOError: print "Can't read from bg file" return 0 # crop and convert the images to proper ic = ImageConversion(None, numpy.float64, coords) # determine if the fffile needs to be created or read from file if fffile and not options.write: ffimg = numpy.load(fffile) else: # acquire flat field images from file files = glob.glob(rawFlat) ffs = source.FileStackReader(files) # convert images and apply flat field correction ic.setInput(ffs.getOutput()) flatField = FlatFieldAcquisition(ic.getOutput(), len(files)) for _ in range(len(files)): ffs.increment() flatField.update() ffimg = flatField.getFlatField() # write the image if flagged if fffile and options.write: numpy.save(fffile, ffimg) # grab the bacteria images from the directory and sort them files = glob.glob(rawBact) files.sort() # calibrate the converted images calibrated = FlatFieldCalibration(ic.getOutput(),ff=ffimg) # determine if the bgfile needs to be created or read from file if bgfile and not options.write: bgImg, stdDevImg = pickle.load(bgfile) else: # grab background images ffs = source.FileStackReader(files[:bgFrames]) # convert images to correct size and type ic.setInput(ffs.getOutput()) calibrated = FlatFieldCalibration(ic.getOutput(),ff=ffimg) bgMean = StdDevImageAcquisition(calibrated.getOutput(), bgFrames) for _ in range(bgFrames): ffs.increment() bgMean.update() # calculate the background and standard deviation images bgImg = bgMean.getMean() stdDevImg = bgMean.getStdDev() if bgfile and options.write: pickle.dump((bgImg, stdDevImg), bgfile) # grab the bacteria frames ffs = source.FileStackReader(files[bgFrames:]) ffs.setLoop(True) ic.setInput(ffs.getOutput()) #segmented = BinarySegmentationOld(calibrated.getOutput(), bgImg, -5) segmented = BinarySegmentation(calibrated.getOutput(), bgImg, stdDevImg, alpha=float(options.alpha)) # open -> accumulate -> open opened = Opening(segmented.getOutput(), iterations=1) acc = OrAccumulating(opened.getOutput()) opened2 = Opening(acc.getOutput(), iterations=6) # find labels labels = ConnectedComponents(opened2.getOutput()) # gather information rp = RegionProperties(labels.getOutput()) wk = 0 count = 0 while wk != 27: count += 1 ffs.increment() rp.update() print ffs.getIndex() b=str("b-%03d.png" %(count)) s=str("s-%03d.png" %(count)) l=str("l-%03d.png" %(count)) p=str("p-%03d.png" %(count)) # display windows wk = imgutil.imageShow(segmented.getOutput().getData(), wait = 10, title="Binary Segmentation", file=b, flag=options.flag)&255 wk = imgutil.imageShow(ic.getOutput().getData(), wait = 10, title="Source", file=s, flag=options.flag)&255 wk = imgutil.imageShow(labels.getOutput().getData(), wait = 10, title="Labels", file=l, flag=options.flag)&255 wk = imgutil.imageShow(rp.getOutput().getData(), wait = 10, title="Perimeter", file=p, flag=options.flag)&255 # display windows wk = imgutil.imageShow(segmented.getOutput().getData(), wait = 10, title="Binary Segmentation")&255 wk = imgutil.imageShow(ic.getOutput().getData(), wait = 10, title="Source")&255 wk = imgutil.imageShow(labels.getOutput().getData(), wait = 10, title="Labels")&255 wk = imgutil.imageShow(rp.getOutput().getData(), wait = 10, title="Perimeter")&255 # waits for the user to reset the image loop if ffs.getIndex() == ffs.getLength()-1: pickle.dump((labels.getOutput().getData(), rp.com, rp.area, rp.circularity), file("data.p","w")) growth(rp.com, rp.area, rp.circularity) raw_input("Press enter to reset") acc.reset() rp.reset() # keys for controlling the number of standard deviations # up arrow: increases standard deviations # down arrow: decreases standard deviations if wk==82: segmented.setAlpha(segmented.getAlpha()+0.05) print segmented.getAlpha() acc.reset() rp.reset() if wk==84: print segmented.getAlpha() segmented.setAlpha(segmented.getAlpha()-0.05) acc.reset() rp.reset()
def display_avg(self): avg_image = self.avg.get_avg_image() if avg_image != None: imgutil.imageShow(avg_image, "Averaged image", False, 10)