Пример #1
0
def avgstd_image_list(images):
    mean = None
    std = None
    if len(images) > 0:
        scale = 1. / len(images)
        mean = cv.CreateImage(cv.GetSize(images[0]), cv.IPL_DEPTH_32F,
                              images[0].channels)
        std = cv.CreateImage(cv.GetSize(images[0]), cv.IPL_DEPTH_32F,
                             images[0].channels)
        buf = cv.CreateImage(cv.GetSize(images[0]), cv.IPL_DEPTH_32F,
                             images[0].channels)
        for image in images:
            cv.Add(image, mean, mean)
            cv.Mul(image, image, buf)
            cv.Add(buf, std, std)
        cv.ConvertScale(mean, mean, scale)
        cv.ConvertScale(std, std, scale)
        cv.Mul(mean, mean, buf)
        cv.Sub(std, buf, std)
        cv.Pow(std, std, 0.5)

        meanresult = cv.CreateImage(cv.GetSize(images[0]), images[0].depth,
                                    images[0].channels)
        stdresult = cv.CreateImage(cv.GetSize(images[0]), images[0].depth,
                                   images[0].channels)
        cv.ConvertScale(mean, meanresult)
        cv.ConvertScale(std, stdresult)
        del buf
        del std
        del mean
    return (meanresult, stdresult)
def createModelsfromStats():
    cv.ConvertScale(IavgF, IavgF, float(1.0 / Icount))
    cv.ConvertScale(IdiffF, IdiffF, float(1.0 / Icount))

    cv.AddS(IdiffF, cv.Scalar(1.0, 1.0, 1.0), IdiffF)
    setHighThresh(10.0)
    setLowThresh(10.0)
Пример #3
0
def scale_32f_image(image):
    '''
    Scales the given cv.IPL_DEPTH_32F type image to an 8 bit image so that the
    smallest value maps to 0 and the largest maps to 255.  Used for displaying
    debugging images.

    Processes each channel separately, which can produce some useful, but
    esoteric results.

    '''
    if image.depth != cv.IPL_DEPTH_32F:
        return image
    result = cv.CreateImage(cv.GetSize(image), 8, image.channels)
    channel_image = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1)
    channel_scaled = cv.CreateImage(cv.GetSize(image), 8, 1)
    for channel_num in xrange(1, image.channels + 1):

        cv.SetImageCOI(image, channel_num)
        cv.Copy(image, channel_image)
        minmaxloc = cv.MinMaxLoc(channel_image)
        minimum = minmaxloc[0]
        maximum = minmaxloc[1]
        if maximum - minimum > 0:
            cv.ConvertScale(channel_image, channel_scaled,
                            255 / (maximum - minimum),
                            -255 / (maximum - minimum) * minimum)
        else:
            cv.ConvertScale(channel_image, channel_scaled, 0, -255 / minimum)

        cv.SetImageCOI(result, channel_num)
        cv.Copy(channel_scaled, result)

    cv.SetImageCOI(image, 0)
    cv.SetImageCOI(result, 0)
    return result
Пример #4
0
    def run(self):
		
        while True:
            frame = cv.QueryFrame( self.capture)
            
			# Run the cam-shift
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
				sub = cv.GetSubRect(frame, self.selection)
				save = cv.CloneMat(sub)
				cv.ConvertScale(frame, frame, 0.5)
				cv.Copy(save, sub)
				x,y,w,h = self.selection
				cv.Rectangle(frame, (x,y), (x+w,y+h), (255,255,255))

				sel = cv.GetSubRect(self.hue, self.selection )
				cv.CalcArrHist( [sel], hist, 0)
				(_, max_val, _, _) = cv.GetMinMaxHistValue( hist)
				if max_val != 0:
					cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)        
				elif self.track_window and is_rect_nonzero(self.track_window):
					cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )		
	   	    cv.ShowImage("Output",frame)
	   	   	cv.WaitKey(0)
Пример #5
0
def get_normalized_rgb_planes(r, g, b):
    size = cv.GetSize(r)
    #    r,g,b = get_three_planes(img)

    nr_plane = cv.CreateImage(size, 8, 1)
    ng_plane = cv.CreateImage(size, 8, 1)
    nb_plane = cv.CreateImage(size, 8, 1)

    r32 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    g32 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    b32 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    sum = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    cv.Zero(sum)
    cv.Convert(r, r32)
    cv.Convert(g, g32)
    cv.Convert(b, b32)

    cv.Add(r32, g32, sum)
    cv.Add(b32, sum, sum)

    tmp = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    cv.Div(r32, sum, tmp)
    cv.ConvertScale(tmp, nr_plane, scale=255)
    cv.Div(g32, sum, tmp)
    cv.ConvertScale(tmp, ng_plane, scale=255)
    cv.Div(b32, sum, tmp)
    cv.ConvertScale(tmp, nb_plane, scale=255)

    #    res = image_empty_clone(img)
    #    cv.Merge(nr_plane,ng_plane,nb_plane,None,res)
    return nr_plane, ng_plane, nb_plane
Пример #6
0
    def createModelsfromStats(self):
        cv.ConvertScale(self.IavgF, self.IavgF, float(1.0 / self.Icount))
        cv.ConvertScale(self.IdiffF, self.IdiffF, float(1.0 / self.Icount))

        cv.AddS(self.IdiffF, cv.Scalar(1.0, 1.0, 1.0), self.IdiffF)
        self.setHighThresh(200.0)
        self.setLowThresh(200.0)
    def processFrames(self):
        self.vidcap = cv2.VideoCapture(self.path)

        count = 0

        success, image = self.vidcap.read()
        print success

        self.createWindows()

        while True:
            success, image = self.vidcap.read()

            if not success:
                return

            spare = cv.fromarray(image)

            size = (spare.width / 2, spare.height / 2)

            cv.Smooth(spare, spare, cv.CV_GAUSSIAN, BLUR_SIZE, BLUR_SIZE)

            out = cv.CreateImage(size, 8, 3)
            cv.PyrDown(spare, out)

            yuv = cv.CreateImage(size, 8, 3)
            gray = cv.CreateImage(size, 8, 1)
            canny = cv.CreateImage(size, 8, 1)
            sobel = cv.CreateImage(size, 8, 1)
            harris = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)

            cv.CvtColor(out, yuv, cv.CV_BGR2YCrCb)
            cv.Split(yuv, gray, None, None, None)

            cv.Canny(gray, canny, 50, 200, 3)
            cv.CornerHarris(gray, harris, 3)
            cv.Sobel(gray, sobel, 1, 0, 3)

            cv.ConvertScale(canny, canny, -1, 255)
            cv.ConvertScale(sobel, sobel, -1, 255)

            for y in range(0, out.height):
                for x in range(0, out.width):
                    harr = cv.Get2D(sobel, y, x)
                    if harr[0] < 10e-06:
                        cv.Circle(out, (x, y), 2, cv.RGB(155, 0, 25))

            #cv2.imwrite("frame%d.jpg" % count, np.asarray(canny[:,:]))

            cv.ShowImage('canny', canny)
            #cv.ShowImage(   'harris'   , harris  )
            cv.ShowImage('sobel', sobel)
            cv.ShowImage('output', out)

            if cv2.waitKey(1) == 27:
                break
            count += 1

        return
Пример #8
0
    def loadImages(self, left_file, right_file, im1, im2):
        im1_int = cv.LoadImageM(self.left_folder + '/' + left_file, 0)
        im2_int = cv.LoadImageM(self.right_folder + '/' + right_file, 0)

        cv.ConvertScale(im1_int, im1, 1. / 255)
        cv.ConvertScale(im2_int, im2, 1. / 255)

        self.rig.stereoRectify1(im1, im1)
        self.rig.stereoRectify2(im2, im2)
Пример #9
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = False
        while True:
            frame = cv.QueryFrame(self.capture)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            print(self.hue)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject([self.hue], backproject, hist)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)

            if not backproject_mode:
                #frame=cv.Flip(frame)
                cv.ShowImage("CamShiftDemo", frame)
            else:
                cv.ShowImage("CamShiftDemo", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7)
            if c == 27:
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode
Пример #10
0
    def do_camshift(self, cv_image):
        """ Get the image size """
        image_size = cv.GetSize(cv_image)
        image_width = image_size[0]
        image_height = image_size[1]
        
        """ Convert to HSV and keep the hue """
        hsv = cv.CreateImage(image_size, 8, 3)
        cv.CvtColor(cv_image, hsv, cv.CV_BGR2HSV)
        self.hue = cv.CreateImage(image_size, 8, 1)
        cv.Split(hsv, self.hue, None, None, None)

        """ Compute back projection """
        backproject = cv.CreateImage(image_size, 8, 1)

        """ Run the cam-shift algorithm """
        cv.CalcArrBackProject( [self.hue], backproject, self.hist )
        if self.track_window and is_rect_nonzero(self.track_window):
            crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
            (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
            self.track_window = rect
     
        """ If mouse is pressed, highlight the current selected rectangle
            and recompute the histogram """

        if self.drag_start and is_rect_nonzero(self.selection):
            sub = cv.GetSubRect(cv_image, self.selection)
            save = cv.CloneMat(sub)
            cv.ConvertScale(cv_image, cv_image, 0.5)
            cv.Copy(save, sub)
            x,y,w,h = self.selection
            cv.Rectangle(cv_image, (x,y), (x+w,y+h), (255,255,255))

            sel = cv.GetSubRect(self.hue, self.selection )
            cv.CalcArrHist( [sel], self.hist, 0)
            (_, max_val, _, _) = cv.GetMinMaxHistValue(self.hist)
            if max_val != 0:
                cv.ConvertScale(self.hist.bins, self.hist.bins, 255. / max_val)
        elif self.track_window and is_rect_nonzero(self.track_window):
            cv.EllipseBox( cv_image, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )
            
            roi = RegionOfInterest()
            roi.x_offset = int(min(image_width, max(0, track_box[0][0] - track_box[1][0] / 2)))
            roi.y_offset = int(min(image_height, max(0, track_box[0][1] - track_box[1][1] / 2)))
            roi.width = int(track_box[1][0])
            roi.height = int(track_box[1][1])
            self.ROI.publish(roi)

        cv.ShowImage("Histogram", self.hue_histogram_as_image(self.hist))
        
        if not self.backproject_mode:
            return cv_image
        else:
            return backproject
Пример #11
0
def cam_measurebulk(nframes=100,
                    interactive=True,
                    show=True,
                    norm=False,
                    verb=0):
    """
	Take **nframes** frames and average these. If **norm** is set, set the 
	average of the summed frame to unity, otherwise it is divided by the 
	number of frames.

	This routine is intended to measure flat and dark frames. Flat frames 
	might be normalized such that dividing by these does not affect the 
	average intensity of the input frame. Dark frames should never be 
	normalized.

	The flatfield is stored in CAM_CFG['flat'] and is used automatically 
	from then on.

	@param [in] nframes Number of frames to average
	@param [in] show Show flat field + one correct image when done
	@param [in] verb Verbosity
	@return Summed and scaled frame.
	"""

    if (verb & VERB_M > L_INFO):
        print "Measuring bulk (n=%d)..." % (nframes)

    if (interactive):
        print "Will measure bulk now, press c to continue..."
        while (True):
            cam_getimage(show=True, waitkey=0)
            if (chr(cv.WaitKey(1) & 255) == "c"):
                print "ok!"
                break

    bulkimg = cam_getimage(show=False, dfcorr=False, raw=True)

    for dummy in xrange(nframes - 1):
        cv.Add(bulkimg, cam_getimage(show=False, dfcorr=False, raw=True),
               bulkimg)

    if (norm):
        cv.ConvertScale(bulkimg, bulkimg, scale=1.0 / cv.Avg(bulkimg)[0])
    else:
        cv.ConvertScale(bulkimg, bulkimg, scale=1.0 / nframes)

    if (show):
        cv.NamedWindow("cam_bulkimg", cv.CV_WINDOW_AUTOSIZE)
        cv.ShowImage('cam_bulkimg', bulkimg)
        c = cv.WaitKey(20)

    return bulkimg
Пример #12
0
def average_image_list(images):
    result = None
    if len(images) > 0:
        scale = 1. / len(images)
        mean = cv.CreateImage(cv.GetSize(images[0]), cv.IPL_DEPTH_32F,
                              images[0].channels)
        result = cv.CreateImage(cv.GetSize(images[0]), images[0].depth,
                                images[0].channels)
        for image in images:
            cv.Add(image, mean, mean)
        cv.ConvertScale(mean, mean, scale)
        cv.ConvertScale(mean, result)
        del mean
    return result
    def get_image(self):
        """
        Retrieve an image of the correct type from the Kinect, depending on the
        type that was passed to the constructor.

        Since the classes share a OpenNI camera instance, only obtain the image
        at the set update frequency.
        """
        global NI_grabtime
        global NI_camera

        if time.time() > NI_grabtime + self.grab_interval:
            cv.GrabFrame(NI_camera)
            NI_grabtime = time.time()

        if self.img_type == "depth":
            depth = cv.RetrieveFrame(NI_camera, cv.CV_CAP_OPENNI_DEPTH_MAP)
            temp = cv.CreateImage(cv.GetSize(depth), cv.IPL_DEPTH_8U, 1)
            cv.ConvertScale(depth, temp, 0.0625, 0.0)


#            temp = doUsefulConvert8(cv2array(depth))
        elif self.img_type == "rgb":
            temp = cv.RetrieveFrame(NI_camera, cv.CV_CAP_OPENNI_BGR_IMAGE)
        elif self.img_type == "pcl":
            temp = cv.RetrieveFrame(NI_camera,
                                    cv.CV_CAP_OPENNI_POINT_CLOUD_MAP)

        if temp == None:
            raise Exception("Unable to start Kinect, check connection")
        return temp
Пример #14
0
 def cropFrame(self, frame, lastMarkerLocationX, lastMarkerLocationY):
     if (not self.trackerIsInitialized):
         self.markerTracker.allocateSpaceGivenFirstFrame(self.originalImage)
         self.reducedImage = cv.CreateImage(
             (self.windowWidth, self.windowHeight), frame.depth, 3)
     xCornerPos = lastMarkerLocationX - self.windowWidth / 2
     yCornerPos = lastMarkerLocationY - self.windowHeight / 2
     # Ensure that extracted window is inside the original image.
     if (xCornerPos < 1):
         xCornerPos = 1
     if (yCornerPos < 1):
         yCornerPos = 1
     if (xCornerPos > frame.width - self.windowWidth):
         xCornerPos = frame.width - self.windowWidth
     if (yCornerPos > frame.height - self.windowHeight):
         yCornerPos = frame.height - self.windowHeight
     try:
         self.subImagePosition = (xCornerPos, yCornerPos, self.windowWidth,
                                  self.windowHeight)
         self.reducedImage = cv.GetSubRect(frame, self.subImagePosition)
         cv.ConvertScale(self.reducedImage, self.originalImage)
         cv.CvtColor(self.originalImage, self.frameGray, cv.CV_RGB2GRAY)
     except:
         print("frame: ", frame.depth)
         print("originalImage: ", self.originalImage.height,
               self.originalImage.width, self.originalImage)
         print("frameGray: ", self.frameGray.height, self.frameGray.width,
               self.frameGray.depth)
         print "Unexpected error:", sys.exc_info()[0]
         #quit(0)
         pass
Пример #15
0
def getData():

  for i in range (0 , classes):
    for j in range (0, train_samples):
      if j < 10 :
        fichero = "OCR/"+str(i) + "/"+str(i)+"0"+str(j)+".pbm"
      else:
        fichero = "OCR/"+str(i) + "/"+str(i)+str(j)+".pbm"
      src_image = cv.LoadImage(fichero,0)
      prs_image = preprocessing(src_image, size, size)
  
         
      
      row = cv.GetRow(trainClasses, i*train_samples + j)
      cv.Set(row, cv.RealScalar(i))
      row = cv.GetRow(trainData,   i*train_samples + j)

      img = cv.CreateImage( ( size, size ), cv.IPL_DEPTH_32F, 1) 
      

      cv.ConvertScale(prs_image,img,0.0039215, 0)


      data = cv.GetSubRect(img,  (0,0, size,size))
      row1 = cv.Reshape( data, 0, 1 )
     
      cv.Copy(row1, row)
Пример #16
0
def classify(img):
  nearest=cv.CreateMat(1,K,cv.CV_32FC1)
  prs_image = preprocessing(img, size, size)
  
  img32  = cv.CreateImage( ( size, size ), cv.IPL_DEPTH_32F, 1 )
  cv.ConvertScale(prs_image, img32, 0.0039215, 0)
  data   = cv.GetSubRect(img32,  (0,0, size,size))
  row1   = cv.Reshape( data, 0, 1 )
  result = knn.find_nearest(nearest,row1,K,0,0,0)	
  result = 0
  
  indices = cv.Mat(N, K, cv.CV_32S)
  dists = cv.Mat(N, K, cv.CV_32F)

  flann.knnSearch(m_object, indices, dists, K, cv.SearchParams(250))
    
  
  accuracy=0
  for i in range (0,K):
  #  print nearest
   # if  nearest.data.fl[i] == result:
    accuracy+=1

    pre= 100*(float(accuracy)/float(K))
   
  #print "r: ",result," pre ",pre," accu ",accuracy," K ",K
  return result
Пример #17
0
 def normalize(self, image):
     """ scale image to max of 255 """
     minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(image)
     if maxVal > 0:
         scaler = 255 / maxVal
         cv.ConvertScale(image, image, scale=scaler, shift=0.0)
     return image
Пример #18
0
def main():
    while True:
        cv.NamedWindow('a_window', cv.CV_WINDOW_AUTOSIZE)
        #logfiles = sorted([ f for f in os.listdir(report_dirName) if f.startswith('image')])
        #logfiles=GetLatestArchive('image*.jpg')
        latest_folder = report_dirName + latest_file(name_start='Z',
                                                     name_end='') + '\\'
        image = cv.LoadImage(
            latest_folder +
            latest_file(path=latest_folder, name_start='Z', name_end='.tif'),
            cv.CV_LOAD_IMAGE_COLOR)  # .jpg images are 4x times smaller
        #img = cv2.imread(latest_folder+latest_file(path=latest_folder, name_start='', name_end='.tif'))
        #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        #img2=cv2.equalizeHist(gray)
        #cvmat_img2=cv.fromarray(img2)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
        newFrameImage8U = cv.CreateImage((image.width, image.height),
                                         cv.IPL_DEPTH_8U,
                                         3)  # optional convert to 8U
        cv.ConvertScale(image, newFrameImage8U)  # optional
        image = newFrameImage8U  # optional
        cv.PutText(image, "Counter:", (x, y), font, 255)  #Draw the text
        #cv.PutText(cvmat_img2,"Counter:", (x,y),font, 255)
        cv.ShowImage('a_window', image)  #Show the image
        #cv.Waitkey(10000)
        # open the latest xml-file in this folder and get the stage coordinates (x,y,z)
        (stage_x, stage_y, stage_z) = return_xyz_coordinates(
            latest_folder +
            latest_file(path=latest_folder, name_start='', name_end='.xml'))
        print 'stage coordinates x,y,z:', stage_x, stage_y, stage_z
        if cv.WaitKey(10) == 27:
            break
    cv.DestroyWindow("a_window")
Пример #19
0
def show_threshold(filename):
    '''threshold an image'''
    global threshold, imgf
    pgm = util.PGM(filename)

    cv.ConvertScale(pgm.img, imgf, scale=1.0 / 65536)
    return change_threshold(threshold)
Пример #20
0
def main():
    if len(sys.argv) < 1:
        print "Come on, give me some files to play with"
        return

    print "Reading image " + sys.argv[1]

    incoming = cv.LoadImageM(sys.argv[1])
    w, h = (incoming.cols, incoming.rows)
    nw, nh = (int(w * 1.5 + 0.5), int(h * 1.5 + 0.5))
    img = cv.CreateImage(cv.GetSize(incoming), cv.IPL_DEPTH_32F, 3)
    cv.Convert(incoming, img)

    n = 0
    for f in sys.argv[1:]:
        incoming = cv.LoadImageM(f)
        w, h = (incoming.cols, incoming.rows)
        nw, nh = (int(w * 1.5 + 0.5), int(h * 1.5 + 0.5))
        new = cv.CreateImage(cv.GetSize(incoming), cv.IPL_DEPTH_32F, 3)
        cv.Convert(incoming, new)

        n += 1
        print "Read in image [%04d] [%s]" % (n, f)
        img = imageBlend(img, new, 1.0 / n)

        del (new)

    out = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_16U, 3)
    cv.ConvertScale(img, out, 256.)
    cv.SaveImage("out-16-up.png", out)
    print "Written out-16-up.png"
Пример #21
0
 def display_scanline_associations(self, associations):
     display_image = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_8UC1)
     cv.ConvertScale(associations, display_image,
                     255.0 / self.number_of_scanlines)
     cv.NamedWindow("associations", flags=0)
     cv.ShowImage("associations", display_image)
     cv.WaitKey(800)
Пример #22
0
    def run(self):

        self.objects[0] = list()

        while True:
            frame = cv.QueryFrame(self.capture)
            if frame == 0:
                break
            self.frameNumber = self.frameNumber + 1

            if self.frameNumber % 10 == 0:
                self.save()

            #copy last frame objects
            self.objects[self.frameNumber] = deepcopy(
                self.objects[self.frameNumber - 1][:])

            while True:
                frameSelection = cv.CloneImage(frame)
                for object in self.objects[self.frameNumber]:
                    x, y, w, h = object.getRectangle()
                    cv.Rectangle(frameSelection, (x, y), (x + w, y + h),
                                 (255, 255, 255))

    # If mouse is pressed, highlight the current selected rectangle
    # and recompute the histogram
                if self.drag_start and is_rect_nonzero(self.selection):

                    sub = cv.GetSubRect(frameSelection, self.selection)
                    save = cv.CloneMat(sub)
                    cv.ConvertScale(frameSelection, frameSelection, 0.5)
                    cv.Copy(save, sub)
                    x, y, w, h = self.selection
                    cv.Rectangle(frameSelection, (x, y), (x + w, y + h),
                                 (255, 255, 255))

                cv.ShowImage("Output", frameSelection)
                key = cv.WaitKey(1000 / 25)
                if key == ord("n"):
                    break
                if key == ord("w"):
                    self.save()
                if key == ord("s"):
                    self.startFrame = self.frameNumber
                    print "Set Starting frame: " + self.frameNumber
                if key == ord("c"):
                    self.objects[self.frameNumber].extend(
                        self.objects[self.frameNumber - 1][:])
                if key == ord("d"):
                    self.objects[self.frameNumber] = list()
                elif key == 65364:
                    moveObjects(self.objects[self.frameNumber], (0, 1))
                elif key == 65362:
                    moveObjects(self.objects[self.frameNumber], (0, -1))
                elif key == 65363:
                    moveObjects(self.objects[self.frameNumber], (1, 0))
                elif key == 65361:
                    moveObjects(self.objects[self.frameNumber], (-1, 0))
Пример #23
0
 def draw_mouse_drag_area(self, frame):
     """ Highlight the current selected rectangle
     """
     sub = cv.GetSubRect(frame, self.selection)
     save = cv.CloneMat(sub)
     cv.ConvertScale(frame, frame, 0.5)
     cv.Copy(save, sub)
     x, y, w, h = self.selection
     cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))
Пример #24
0
 def redisplay_mosaic(self):
     '''re-display whole mosaic page'''
     self.mosaic = cv.CreateImage((self.height, self.width), 8, 3)
     cuav_util.zero_image(self.mosaic)
     for ridx in range(len(self.regions)):
         self.display_mosaic_region(ridx)
     if self.brightness != 1.0:
         cv.ConvertScale(self.mosaic, self.mosaic, scale=self.brightness)
     self.image_mosaic.set_image(self.mosaic, bgr=True)
Пример #25
0
    def scanline_numbers_to_planes(self, scanline_numbers):
        rows = scanline_numbers.height
        cols = scanline_numbers.width
        normal_vectors_x = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_x, -1)
        normal_vectors_y = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_y, 0)
        normal_vectors_z = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Copy(scanline_numbers, normal_vectors_z)

        cv.ConvertScale(normal_vectors_z,
                        normal_vectors_z,
                        scale=self.pixels_per_scanline)
        cv.AddS(normal_vectors_z, -self.center_pixel, normal_vectors_z)
        cv.ConvertScale(normal_vectors_z,
                        normal_vectors_z,
                        scale=1.0 / self.projector_model.fx())

        normal_vectors = cv.CreateMat(rows, cols, cv.CV_32FC3)
        cv.Merge(normal_vectors_x, normal_vectors_y, normal_vectors_z, None,
                 normal_vectors)

        # Bring the normal vectors into camera coordinates
        cv.Transform(normal_vectors, normal_vectors,
                     self.projector_to_camera_rotation_matrix)

        normal_vectors_split = [None] * 3
        for i in range(3):
            normal_vectors_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Split(normal_vectors, normal_vectors_split[0],
                 normal_vectors_split[1], normal_vectors_split[2], None)

        n_dot_p = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.SetZero(n_dot_p)
        for i in range(3):
            cv.ScaleAdd(normal_vectors_split[i],
                        self.projector_to_camera_translation_vector[i],
                        n_dot_p, n_dot_p)

        planes = cv.CreateMat(rows, cols, cv.CV_32FC4)
        cv.Merge(normal_vectors_split[0], normal_vectors_split[1],
                 normal_vectors_split[2], n_dot_p, planes)

        return planes
Пример #26
0
    def __init__(self, left_filter, right_filter, left_rect, right_rect):
        '''
        @param left_filter: is in the Fourier domain where the left eye 
                corresponds to the real output and the right eye corresponds to 
                the imaginary output
        '''
        # Check the input to this function
        r, c = left_filter.rows, left_filter.cols

        assert left_filter.width == right_filter.width
        assert left_filter.height == right_filter.height
        assert left_filter.channels == 1
        assert right_filter.channels == 1

        # Create the arrays needed for the computation
        self.left_filter = cv.CreateMat(r, c, cv.CV_32F)
        self.right_filter = cv.CreateMat(r, c, cv.CV_32F)
        self.left_filter_dft = cv.CreateMat(r, c, cv.CV_32F)
        self.right_filter_dft = cv.CreateMat(r, c, cv.CV_32F)
        self.image = cv.CreateMat(r, c, cv.CV_32F)
        self.left_corr = cv.CreateMat(r, c, cv.CV_32F)
        self.right_corr = cv.CreateMat(r, c, cv.CV_32F)

        # Populate the spatial filters
        cv.ConvertScale(left_filter, self.left_filter)
        cv.ConvertScale(right_filter, self.right_filter)

        # Compute the filters in the Fourier domain
        cv.DFT(self.left_filter, self.left_filter_dft, cv.CV_DXT_FORWARD)
        cv.DFT(self.right_filter, self.right_filter_dft, cv.CV_DXT_FORWARD)

        # Set up correlation region of interest
        self.left_rect = left_rect
        self.right_rect = right_rect

        self.left_roi = cv.GetSubRect(self.left_corr, self.left_rect)
        self.right_roi = cv.GetSubRect(self.right_corr, self.right_rect)

        # Create the look up table for the log transform
        self.lut = cv.CreateMat(256, 1, cv.CV_32F)

        for i in range(256):
            self.lut[i, 0] = math.log(i + 1)
Пример #27
0
 def mkgray(self, msg):
     """
     Convert a message into a 8-bit 1 channel monochrome OpenCV image
     """
     # as cv_bridge automatically scales, we need to remove that behavior
     if msg.encoding.endswith('16'):
         mono16 = self.br.imgmsg_to_cv(msg, "mono16")
         mono8 = cv.CreateMat(mono16.rows, mono16.cols, cv.CV_8UC1)
         cv.ConvertScale(mono16, mono8)
         return mono8
     elif 'FC1' in msg.encoding:
         # floating point image handling
         img = self.br.imgmsg_to_cv(msg, "passthrough")
         mono_img = cv.CreateMat(img.rows, img.cols, cv.CV_8UC1)
         _, max_val, _, _ = cv.MinMaxLoc(img)
         scale = 255.0 / max_val if max_val > 0 else 1.0
         cv.ConvertScale(img, mono_img, scale)
         return mono_img
     else:
         return self.br.imgmsg_to_cv(msg, "mono8")
Пример #28
0
    def set_hist(self, frame, selection):
        sub = cv.GetSubRect(frame, selection)
        save = cv.CloneMat(sub)

        cv.ConvertScale(frame, frame, 0.5)
        cv.Copy(save, sub)
        x, y, w, h = selection

        # rectangular piece of frame
        cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

        sel = cv.GetSubRect(self.hue, selection)
        cv.CalcArrHist([sel], self.hist, 0)

        # get the most prevalent color in the histogram
        (_, max_val, _, _) = cv.GetMinMaxHistValue(self.hist)

        if max_val != 0:
            cv.ConvertScale(self.hist.bins, self.hist.bins, 255. / max_val)
            print "Val set to " + str(max_val)
Пример #29
0
def show_edges(filename):
    '''show edges in an image'''
    pgm = util.PGM(filename)

    # convert to 8 bit
    img8 = cv.CreateImage((1280, 960), 8, 1)
    cv.ConvertScale(pgm.img, img8, scale=1.0 / 256)

    edge1 = cv.CreateImage((1280, 960), 8, 1)
    cv.Canny(img8, edge1, 250, 255, 5)

    edgecolor = cv.CreateImage((1280, 960), 8, 3)
    edgecolor16 = cv.CreateImage((1280, 960), 16, 3)
    cv.CvtColor(edge1, edgecolor, cv.CV_GRAY2RGB)
    cv.ConvertScale(edgecolor, edgecolor16, scale=256)

    color_img = cv.CreateImage((1280, 960), 16, 3)
    cv.CvtColor(pgm.img, color_img, cv.CV_GRAY2RGB)

    cv.AddWeighted(color_img, 1.0, edgecolor16, 1.0, 0.5, color_img)
Пример #30
0
 def redisplay_mosaic(self):
     '''re-display whole mosaic page'''
     width = (self.width // self.thumb_size) * self.thumb_size
     height = (self.height // self.thumb_size) * self.thumb_size
     self.mosaic = cv.CreateImage((width,height),8,3)
     cuav_util.zero_image(self.mosaic)
     for ridx in range(len(self.regions_sorted)):
         self.display_mosaic_region(ridx)
     if self.brightness != 1.0:
         cv.ConvertScale(self.mosaic, self.mosaic, scale=self.brightness)
     self.image_mosaic.set_image(self.mosaic, bgr=True)