コード例 #1
0
    def __init__(self, slipmap,
                 grid_width=20, grid_height=20, thumb_size=35, C=CameraParams(),
                 camera_settings = None,
                 image_settings = None,
                 start_menu=False,
                 classify=None):
        self.thumb_size = thumb_size
        self.width = grid_width * thumb_size
        self.height = grid_height * thumb_size
        self.mosaic = cv.CreateImage((self.height,self.width),8,3)
        cuav_util.zero_image(self.mosaic)
        self.display_regions = grid_width*grid_height
        self.regions = []
        self.regions_sorted = []
        self.regions_hidden = set()
        self.mouse_region = None
        self.ridx_by_frame_time = {}
        self.page = 0
        self.sort_type = 'Time'
        self.images = []
        self.current_view = 0
        self.last_view_latlon = None
        self.view_filename = None
        self.full_res = False
        self.boundary = []
        self.displayed_image = None
        self.last_click_position = None
        self.c_params = C
        self.camera_settings = camera_settings
        self.image_settings = image_settings
        self.start_menu = start_menu
        self.classify = classify
        self.has_started = not start_menu
        import wx
        self.image_mosaic = mp_image.MPImage(title='Mosaic', 
                                             mouse_events=True,
                                             key_events=True,
                                             auto_size=False,
                                             report_size_changes=True)
        self.slipmap = slipmap
        self.selected_region = 0

        self.view_image = None
        self.brightness = 1

        # dictionary of image requests, contains True if fullres image is wanted
        self.image_requests = {}

        self.slipmap.add_callback(functools.partial(self.map_callback))

        if classify:
            import lxml.objectify, lxml.etree
            with open(classify) as f:
                categories = lxml.objectify.fromstring(f.read())
                cat_names = set()
                self.categories = []
                try:
                    for c in categories.category:
                        self.categories.append((c.get('shortcut') or '', c.text))
                        if c.text in cat_names:
                            print 'WARNING: category name',c.text,'used more than once'
                        else:
                            cat_names.add(c.text)
                except AttributeError as ex:
                    print ex
                    print 'failed to load any categories for classification'
            self.region_class = lxml.objectify.E.regions()

        self.add_menus()
コード例 #2
0
def callback(data):
    global frame
    global frame_size
    global gray_frame
    global canny_result
    global forrun
    global old_target
    bridge = CvBridge()
    try:
        cv_image = bridge.imgmsg_to_cv(data, "bgr8")
    except CvBridgeError, e:
        print e
    frame = cv_image
    frame_size = cv.GetSize(frame)
    gray_frame = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    canny_result = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    cv.CreateTrackbar("canny_init", 'camera', get_canny_init(), 1000,
                      on_init_change)
    cv.CreateTrackbar("canny_link", 'camera', get_canny_link(), 1000,
                      on_link_change)
    forrun = frame_size[1] / 2
    old_target = [frame_size[0] / 2, forrun]
    los()


def main(args):
    global canny_init
    global canny_link
    global frame_on
    global canny_on
コード例 #3
0
            moments = cv.Moments(largest_contour, 1)
            positions_x.append(
                cv.GetSpatialMoment(moments, 1, 0) /
                cv.GetSpatialMoment(moments, 0, 0))
            positions_y.append(
                cv.GetSpatialMoment(moments, 0, 1) /
                cv.GetSpatialMoment(moments, 0, 0))
            # discard all but the last N positions
            positions_x, positions_y = positions_x[-SMOOTHNESS:], positions_y[
                -SMOOTHNESS:]

    #
    # object_indicator will be the new image which shows where the identified
    # blob has been located.
    #
    object_indicator = cv.CreateImage(cv.GetSize(image), image.depth, 3)

    #
    # the average location of the identified blob
    #
    pos_x = (sum(positions_x) / len(positions_x))
    pos_y = (sum(positions_y) / len(positions_y))
    object_position = (int(pos_x), int(pos_y))

    cv.Circle(object_indicator, object_position, 12, (0, 0, 255), 4)

    if FOLLOW and blobContour:
        # draw a line to the desiredPosition
        desiredPosition = cv.GetSize(image)
        desiredPosition = tuple(map(operator.mul, desiredPosition, (0.5, 0.5)))
        desiredPosition = tuple(map(int, desiredPosition))
コード例 #4
0
ファイル: harp2.py プロジェクト: tawanjairew/openlase
    dots.append((-x, off))

olt.dots = dots

time.sleep(1)
camera = cv.CreateCameraCapture(0)
cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_WIDTH, WIDTH)
cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_HEIGHT, HEIGHT)
cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_BRIGHTNESS, 0)

image = cv.QueryFrame(camera)
image = cv.QueryFrame(camera)
image = cv.QueryFrame(camera)
image = cv.QueryFrame(camera)

grey = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
grey2 = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)

olt.start()
print "Thread running"
time.sleep(1)

try:
    for i in range(10):
        image = cv.QueryFrame(camera)
    refpoints = getpoints(image)
    refpoints.sort(key=lambda x: x[0])

    print len(refpoints), n_end - n_start + 1

    frameno = 1
コード例 #5
0
    def __init__(self,
                 frame,
                 rect,
                 update_rate = 1.0/16.0, 
                 min_psr = 8.0,
                 max_psr=None,
                 tile_size=(64,64), 
                 reg = 0.1, 
                 type = "MOSSE", 
                 sigma=2.0, 
                 init_time = 8,
                 strategy=INIT_NINE_TRANSFORM,
                 dscale=0.05,
                 drotate=0.1, 
                 update_location=True,
                 subpixel=False,
                 color="red", 
                 **kwargs):
        '''
        This function creates and initializes the filter based tracking.  On 
        creation the tracker requires two argument.  The first frame for 
        tracking, and a rectangle specifying the tracked object.
        
        @param frame: A source video frame to _initialize from.
        @type  frame: pv.Image
        @param rect: A rectangle defining the object to be tracked in "frame".
        @type  rect: pv.Rect         
        @keyword update_rate: The adaptation rate of the filter.
        @type  update_rate: 0.0 <= float <= 1.0
        @keyword tile_size: The down sampled size for the track window. 
        @type  tile_size: (width,height)
        @keyword sigma: The radius of the Gaussian used to generate the filter output.
        @type  sigma: float
        @keyword reg: A regularization parameter to improve the stability of the filter.
        @type  reg: float
        @keyword type: The type of filter used for tracking.
        @type  type: "MOSSE","ASEF","UMACE", or "MEAN"
        @keyword strategy: a strategy used to create the first filter used by the tracker.
        @type  strategy: INIT_ONE_TRANSFORM, INIT_NINE_TRANSFORM
        @keyword dscale: The size of the scale changes for initialization.
        @type  dscale: float
        @keyword drotate: The size of the rotation change for initialization (in radians).
        @type  drotate: float
        @keyword min_psr: Minimum PSR needed for tracking.  The threshold for occlusion detection.
        @type  min_psr: float
        @keyword max_psr: The threshold for turning off adaptation.  This may computation time when the tracker is performing well.  Set to a large value probably greater than 30.
        @type  max_psr: float
        @keyword init_time: The minimum number for frames before min_psr threshold applies.  Provides a short initialization time. 
        @type  init_time: int
        @keyword update_location: Disables location update for track.  Good for things like measuring camera motion.
        @type  update_location: Boolean
        @keyword subpixel: Allow subpixel estimation of track center.  This may increase instability.
        @type  subpixel: boolean
        @keyword color: A color used for annotateFrame.
        @type  color: "red" or "#FF0000"
        @param kwargs: Additional keyword parameters passed to the filter.
        @type  kwargs: dict
        '''
        # Set Defaults

        # Setup translation filters for fast peak creation
        ocof.translationFilter.initSize(tile_size)

        self.rect = None
        self.psr_cache = None
        self.corr = None
        self.min_psr = min_psr
        self.max_psr = max_psr
        self.init_time = init_time
        self.life = 0
        
        
        self.tile_size = tile_size
        self.filter = _TrackingFilter(tile_size,type=type,sigma=sigma,reg=reg,norm_meanunit=True,**kwargs)
        self.resampled = cv.CreateImage(self.tile_size,cv.IPL_DEPTH_8U,3)
        self.prevRect = None
        self.update_rate = update_rate

        self.frame = -1
        self.update_time = 0.0
        
        self.strategy = strategy
        self.dscale = dscale
        self.drotate = drotate
        
        self.update_location = update_location
        self.subpixel = subpixel

        self.best_estimate = None
        
        self._initialize(frame,rect)
        
        self.in_bounds = True
コード例 #6
0
def findBlobs(img, min_color, max_color, window, renderedwindow, location,
              area):
    blobs = cvblob.Blobs()  #initializes the blobs class
    size = cv.GetSize(img)  #gets size of image

    hsv = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)  #New HSV image for alter
    thresh = cv.CreateImage(size, cv.IPL_DEPTH_8U,
                            1)  #New Gray Image for later
    labelImg = cv.CreateImage(size, cvblob.IPL_DEPTH_LABEL,
                              1)  #New Blob image for later

    cv.CvtColor(img, hsv, cv.CV_BGR2HSV)  #converts image to hsv image
    cv.InRangeS(hsv, min_color, max_color, thresh)  #thresholds it

    #Corrections to remove false positives
    cv.Smooth(thresh, thresh, cv.CV_BLUR)
    cv.Dilate(thresh, thresh)
    cv.Erode(thresh, thresh)

    result = cvblob.Label(thresh, labelImg,
                          blobs)  #extracts blobs from a greyscale image

    numblobs = len(
        blobs.keys())  #number of blobs based off of length of blobs dictionary

    #if there are blobs found
    if (numblobs > 0):
        avgsize = int(result / numblobs)
        print str(numblobs) + " blobs found covering " + str(result) + "px"

        #Removes blobs that are smaller than a certain size based off of average size
        remv = []
        for x in blobs:
            if (blobs[x].area < avgsize / 3):
                remv.append(x)
        for x in remv:
            del blobs[x]

        numblobs = len(
            blobs.keys())  #gets the number of blobs again after removing some
        print str(numblobs) + " blobs remaining"

        filtered = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1)
        cvblob.FilterLabels(
            labelImg, filtered, blobs
        )  #Creates a binary image with the blobs formed (imgIn, imgOut, blobs)

        #Centroid, area, and circle for all blobs
        for blob in blobs:
            location.append(cvblob.Centroid(blobs[blob]))
            area.append(blobs[blob].area)
            cv.Circle(img, (int(cvblob.Centroid(
                blobs[blob])[0]), int(cvblob.Centroid(blobs[blob])[1])),
                      int(math.sqrt(int(blobs[blob].area) / 3.14)) + 25,
                      cv.Scalar(0, 0, 0))

        imgOut = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
        cv.Zero(imgOut)

        cvblob.RenderBlobs(
            labelImg, blobs, img, imgOut,
            cvblob.CV_BLOB_RENDER_COLOR | cvblob.CV_BLOB_RENDER_CENTROID
            | cvblob.CV_BLOB_RENDER_BOUNDING_BOX | cvblob.CV_BLOB_RENDER_ANGLE,
            1.0)  #Marks up the blobs image to put bounding boxes, etc on it

        cv.ShowImage("Window", img)  #shows the orininalimage
        cv.ShowImage("Rendered", imgOut)  #shows the blobs image

        return blobs  #returns the list of blobs

    else:
        print " ...Zero blobs found. \nRedifine color range for better results"  #if no blobs were found print an error message

        cv.ShowImage("Window", img)  #show the original image
コード例 #7
0
def getThresholdImage(imgHSV):
    imgThresh = cv.CreateImage(cv.GetSize(imgHSV), IPL_DEPTH_8U, 1)
    cv.InRangeS(imgHSV, cv.Scalar(lowerH, lowerS, lowerV),
                cv.Scalar(upperH, upperS, UpperV), imgThresh)

    return imgThresh
コード例 #8
0
ファイル: lscan.py プロジェクト: vck/pylatscan
    def show(self):
        """ Process and show the current frame """
        source = cv.LoadImage(self.files[self.index])
        width, height = cv.GetSize(source)

        center = (width / 2) + self.offset

        cv.Line(source, (center, 0), (center, height), (0, 255, 0), 1)

        mask = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)

        if self.roi:
            red = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)

            cv.Zero(mask)
            cv.Split(source, None, None, red, None)
            cv.InRangeS(red, self.thresholdMin, self.thresholdMax, red)
            cv.Copy(red, mask)

            x, y, x1, y1 = self.roi

            width = x1 - x
            height = y1 - y

            cv.Rectangle(source, (int(x), int(y)), (int(x1), int(y1)),
                         (255.0, 255, 255, 0))

            cv.SetImageROI(source, (x, y, width, height))
            cv.SetImageROI(red, (x, y, width, height))

            tmp = cv.CreateImage(cv.GetSize(red), cv.IPL_DEPTH_8U, 1)
            cv.Zero(tmp)
            cv.Copy(red, tmp)

            line = []
            points = []

            #cv.ShowImage('red',tmp);

            step = 1

            for i in range(0, height - 1):
                row = cv.GetRow(tmp, i)
                minVal, minLoc, maxLoc, maxVal = cv.MinMaxLoc(row)

                y = i
                x = maxVal[0]
                point = (0, 0, 0)

                if x > 0:
                    line.append((x, y))

                    s = x / sin(self.camAngle)
                    x = s * cos(self.angles[self.index])
                    z = y / 2.0
                    y = s * sin(self.angles[self.index])
                    point = (x, y, z)

                points.append(point)

            cv.PolyLine(source, [line], False, (255, 0, 0), 2, 8)

            cv.ResetImageROI(red)
            cv.ResetImageROI(source)

        if self.mode == 'mask':
            cv.ShowImage('preview', mask)
            return

        if self.mode == 'record' and self.roi:
            font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 1)
            cv.PutText(source, "recording %d" % self.index, (20, 20), font,
                       (0, 0, 255))
            self.points.extend(points)
            #self.colors.extend(colors);

        cv.ShowImage('preview', source)
コード例 #9
0
        i += 1

    cv.WaitKey(1000)


j = 0
while j < 5:
    # capture frame from webcam
    rawimage = cv.QueryFrame(capture)

    DisplayWriteFrame(rawimage)

    # convert frame to greyscale
    depth = 8
    channels = 1
    greyscaleimage = cv.CreateImage((width, height), depth, channels)
    cv.CvtColor(rawimage, greyscaleimage, cv.CV_BGR2GRAY)

    DisplayWriteFrame(greyscaleimage)

    # perform canny edge detection on frame
    depth = 8
    channels = 1
    lowerthreshold = 10
    upperthreshold = 100
    aperture = 3
    edgeimage = cv.CreateImage((width, height), depth, channels)
    cv.Canny(greyscaleimage, edgeimage, lowerthreshold, upperthreshold,
             aperture)

    DisplayWriteFrame(edgeimage)
コード例 #10
0
    def show( self ):
        """ Process and show the current frame """
        source  = cv.LoadImage( self.files[self.index] )
        width, height = cv.GetSize(source)
        
        center = (width/2) + self.offset;
        
        cv.Line( source, (center,0), (center,height), (0,255,0), 1)


        if self.roi:
            x,y,a,b = self.roi;
            
            width, height = ((a - x), (b - y))
            mask = cv.CreateImage( (width, height), cv.IPL_DEPTH_8U, 1)
        
            cv.SetImageROI( source, (x, y, width, height))           
            cv.Split( source, None, None, mask, None );
            
            gray = cv.CloneImage( mask );
        
            cv.InRangeS( mask, self.thresholdMin, self.thresholdMax, mask );
            cv.And( mask, gray, gray );            
        
            line    = [];
            points  = []; 
        
            for i in range(0,height-1):
                point   = (0, 0, 0)
                row     = cv.GetRow( gray, i)
                
                minVal,minLoc,maxLoc,maxVal = cv.MinMaxLoc(row);
                
                y = i;
                x = maxVal[0]
                
                if x > 0:
                    line.append((x,y));
        
                    s = x / sin(radians(self.camAngle))
                    x = s * cos(self.angles[self.index])                    
                    z = height - y
                    y = s * sin(self.angles[self.index])
                    
                    point = (round(x,2),round(y,2),z);
                
                points.append(point)
        
        
            cv.PolyLine( source, [line], False, (255,0,0), 2, 8)
            cv.ResetImageROI( source )
            x,y,a,b = self.roi;
            cv.Rectangle( source, (int(x), int(y)), (int(a), int(b)), (255.0, 255, 255, 0) );

        if self.roi:
            x,y,a,b = self.roi;
            
            width, height = ((a - x), (b - y))
            mask = cv.CreateImage( (width, height), cv.IPL_DEPTH_8U, 1)
        
            cv.SetImageROI( source, (x-width, y, width, height)) # moves roi to the left
            cv.Split( source, None, None, mask, None );
            
            gray = cv.CloneImage( mask );
        
            cv.InRangeS( mask, self.thresholdMin, self.thresholdMax, mask );
            cv.And( mask, gray, gray );            
        
            line    = [];
            #points  = []; 
        
            for i in range(0,height-1):
                point   = (0, 0, 0)
                row     = cv.GetRow( gray, i)
                
                minVal,minLoc,maxLoc,maxVal = cv.MinMaxLoc(row);
                
                y = i;
                x = maxVal[0]
                
                if x > 0:
                    line.append((x,y));
                    
                    x = width - x; # left to the x-axis
        
                    s = x / sin(radians(self.camAngle))
                                        
                    x = s * cos(self.angles[self.index])                    
                    z = height - y# 500 higher then the other.
                    y = s * sin(self.angles[self.index])


                    #x' = x*cos q - y*sin q
                    #y' = x*sin q + y*cos q
                    #z' = z
                    
                    a = radians(300)
                    
                    nx = ( cos(a) * x ) - ( sin(a) * y )
                    ny = ( sin(a) * x ) + ( cos(a) * y )
                    
                    point = (nx,ny,z);
                
                #if point[0] != 0:
                #    points[i] = point;
                
                points.append(point)
                
            cv.PolyLine( source, [line], False, (255,0,0), 2, 8)
            cv.ResetImageROI( source )
            x,y,a,b = self.roi;
            cv.Rectangle( source, (int(x), int(y)), (int(a), int(b)), (255.0, 255, 255, 0) );


        #if self.roi:
        #    x,y,a,b = self.roi;
        #    
        #    width, height = ((a - x), (b - y))
        #    
        #    roi = [x-width, y, x, b];
        #    
        #    x,y,a,b = roi;
        #    
        #    
        #    mask = cv.CreateImage( (width, height), cv.IPL_DEPTH_8U, 1)
        #
        #    cv.SetImageROI( source, (x, y, width, height))           
        #    cv.Split( source, None, None, mask, None );
        #    
        #    gray = cv.CloneImage( mask );
        #
        #    cv.InRangeS( mask, self.thresholdMin, self.thresholdMax, mask );
        #    cv.And( mask, gray, gray );            
        #    cv.ShowImage('tmp', gray)
        #
        #    line    = [];
        #    #points  = []; 
        #
        #    for i in range(0,height-1):
        #        point   = (0, 0, 0)
        #        row     = cv.GetRow( gray, i)
        #        
        #        minVal,minLoc,maxLoc,maxVal = cv.MinMaxLoc(row);
        #        
        #        y = i;
        #        x = maxVal[0]
        #        
        #        if x > 0:
        #            line.append((x,y));
        #            
        #            x = x - width
        #
        #            s = x / sin(-330)
        #            x = s * cos(self.angles[self.index])                    
        #            z = y/2.0
        #            y = s * sin(self.angles[self.index])
        #            
        #            point = (x,y,z);
        #        
        #        points.append(point)
        #
        #
        #    cv.PolyLine( source, [line], False, (255,0,0), 2, 8)
        #    cv.ResetImageROI( source )
        #    x,y,a,b = self.roi;
        #    
        #    cv.Rectangle( source, (roi[0], roi[1]), (roi[2], roi[3]), (255.0, 255, 255, 0) );
        #
        if self.mode == 'mask':
            cv.ShowImage( 'preview', mask )
            return

        if self.mode == 'record' and self.roi:
            font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX,0.5,0.5,1)
            cv.PutText( source, "recording %d" % self.index, (20,20), font, (0,0,255))
            self.points.extend(points);
            #self.colors.extend(colors);



        cv.ShowImage( 'preview', source )
コード例 #11
0
ファイル: cuav_benchmark.py プロジェクト: laetic/cuav
def process(filename):
    '''process one file'''
    pgm = cuav_util.PGM(filename)
    img_full_grey = pgm.array

    im_full = numpy.zeros((960, 1280, 3), dtype='uint8')
    im_640 = numpy.zeros((480, 640, 3), dtype='uint8')

    t0 = time.time()
    for i in range(opts.repeat):
        scanner.debayer(img_full_grey, im_640)
    t1 = time.time()
    print('debayer: %.1f fps' % (opts.repeat / (t1 - t0)))

    t0 = time.time()
    for i in range(opts.repeat):
        scanner.debayer_full(img_full_grey, im_full)
    t1 = time.time()
    print('debayer_full: %.1f fps' % (opts.repeat / (t1 - t0)))

    t0 = time.time()
    im_full2 = cv.CreateImage((1280, 960), 8, 3)
    img_full_grey2 = cv.GetImage(cv.fromarray(img_full_grey))
    for i in range(opts.repeat):
        cv.CvtColor(img_full_grey2, im_full2, cv.CV_BayerBG2BGR)
    t1 = time.time()
    print('debayer_cv_full: %.1f fps' % (opts.repeat / (t1 - t0)))

    t0 = time.time()
    for i in range(opts.repeat):
        img = cv.GetImage(cv.fromarray(im_full))
        cv.CvtColor(img, img, cv.CV_RGB2HSV)
    t1 = time.time()
    print('RGB2HSV_full: %.1f fps' % (opts.repeat / (t1 - t0)))

    t0 = time.time()
    for i in range(opts.repeat):
        img = cv.GetImage(cv.fromarray(im_640))
        cv.CvtColor(img, img, cv.CV_RGB2HSV)
    t1 = time.time()
    print('RGB2HSV_640: %.1f fps' % (opts.repeat / (t1 - t0)))

    t0 = time.time()
    for i in range(opts.repeat):
        thumb = numpy.empty((100, 100, 3), dtype='uint8')
        scanner.rect_extract(im_full, thumb, 120, 125)
    t1 = time.time()
    print('rect_extract: %.1f fps' % (opts.repeat / (t1 - t0)))

    t0 = time.time()
    for i in range(opts.repeat):
        thumb = cuav_util.SubImage(cv.GetImage(cv.fromarray(im_full)),
                                   (120, 125, 100, 100))
    t1 = time.time()
    print('SubImage: %.1f fps' % (opts.repeat / (t1 - t0)))

    t0 = time.time()
    for i in range(opts.repeat):
        scanner.downsample(im_full, im_640)
    t1 = time.time()
    print('downsample: %.1f fps' % (opts.repeat / (t1 - t0)))

    t0 = time.time()
    for i in range(opts.repeat):
        scanner.scan(im_640)
    t1 = time.time()
    print('scan: %.1f fps' % (opts.repeat / (t1 - t0)))

    t0 = time.time()
    for i in range(opts.repeat):
        scanner.scan_full(im_full)
    t1 = time.time()
    print('scan_full: %.1f fps' % (opts.repeat / (t1 - t0)))

    if not hasattr(scanner, 'jpeg_compress'):
        return

    for quality in [30, 40, 50, 60, 70, 80, 90, 95]:
        t0 = time.time()
        for i in range(opts.repeat):
            jpeg = cPickle.dumps(ImagePacket(
                time.time(), scanner.jpeg_compress(im_full, quality)),
                                 protocol=cPickle.HIGHEST_PROTOCOL)
        t1 = time.time()
        print('jpeg full quality %u: %.1f fps  %u bytes' %
              (quality, opts.repeat / (t1 - t0), len(bytes(jpeg))))

    for quality in [30, 40, 50, 60, 70, 80, 90, 95]:
        t0 = time.time()
        for i in range(opts.repeat):
            img2 = cv.fromarray(im_full)
            jpeg = cPickle.dumps(ImagePacket(
                time.time(),
                cv.EncodeImage(
                    '.jpeg', img2,
                    [cv.CV_IMWRITE_JPEG_QUALITY, quality]).tostring()),
                                 protocol=cPickle.HIGHEST_PROTOCOL)
        t1 = time.time()
        print('EncodeImage full quality %u: %.1f fps  %u bytes' %
              (quality, opts.repeat / (t1 - t0), len(bytes(jpeg))))

    for quality in [30, 40, 50, 60, 70, 80, 90, 95]:
        t0 = time.time()
        for i in range(opts.repeat):
            jpeg = cPickle.dumps(ImagePacket(
                time.time(), scanner.jpeg_compress(im_640, quality)),
                                 protocol=cPickle.HIGHEST_PROTOCOL)
        t1 = time.time()
        print('jpeg 640 quality %u: %.1f fps  %u bytes' %
              (quality, opts.repeat / (t1 - t0), len(bytes(jpeg))))

    for thumb_size in [10, 20, 40, 60, 80, 100]:
        thumb = numpy.zeros((thumb_size, thumb_size, 3), dtype='uint8')
        t0 = time.time()
        for i in range(opts.repeat):
            scanner.rect_extract(im_full, thumb, 0, 0)
            jpeg = cPickle.dumps(ImagePacket(time.time(),
                                             scanner.jpeg_compress(thumb, 85)),
                                 protocol=cPickle.HIGHEST_PROTOCOL)
        t1 = time.time()
        print('thumb %u quality 85: %.1f fps  %u bytes' %
              (thumb_size, opts.repeat / (t1 - t0), len(bytes(jpeg))))
コード例 #12
0
# coding: utf-8

import cv, commands, sys, os

FOLDER_PATH = "/Users/satokazuki/Desktop/zikken5/G1_kirinuki/"
OUTPUT_PATH = "/Users/satokazuki/Desktop/zikken5/G1_database/"

img_list = os.listdir(FOLDER_PATH)
print img_list
count = 0

for img_name in img_list:
    angle = (0, 90, 180, 270)
    item = img_name.split(".")

    if item[1] == "png":
        im_in = cv.LoadImage(FOLDER_PATH + img_name)
        im_ro = cv.CreateImage(cv.GetSize(im_in), cv.IPL_DEPTH_8U, 3)
        rotate_mat = cv.CreateMat(2, 3, cv.CV_32FC1)
        count += 1

        for i in range(0, 4):
            cv.GetRotationMatrix2D((im_in.height / 2, im_in.width / 2),
                                   angle[i], 1, rotate_mat)
            cv.WarpAffine(im_in, im_ro, rotate_mat)
            cv.SaveImage(OUTPUT_PATH + item[0] + "_" + str(i) + ".png", im_ro)
            count += 1

print count
コード例 #13
0
def rotate(img):
    timg = cv.CreateImage((img.height, img.width), img.depth, img.channels)
    cv.Transpose(img, timg)
    cv.Flip(timg, timg, flipMode=1)
    return timg
コード例 #14
0
font_h = .75
#font_w = 2
font_w = .75
#thickness = 2
thickness = 1
line_type = 8  # 8 (or omitted) 8-connected line
	       # 4 4-connected line
	       # CV_AA antialiased line
# create font
font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN,font_h,font_w,0,thickness,line_type)
color = cv.CV_RGB(255,255,255)
cv.PutText(image,"This image was taken using the\n Raspberry Pi Camera Module", (50,h-50),font,color)
cv.SaveImage("original.jpg",image)
print "Saving image with overlayed text as 'original.jpg'"
cv.ShowImage("ORIGINAL",image)
print "converting to greyscale"

# Convert image to greyscale
grey = cv.CreateImage((w,h),8,1)
cv.CvtColor(image,grey,cv.CV_RGB2GRAY)
cv.ShowImage("grey", grey)
cv.SaveImage('greyscale.jpg',grey)
print "Image saved as 'greyscale.jpg'"
# capture video for default 5s
#vid = cam.piVideo()
#capture video for 10 seconds
#more_vid = cam.piVideo(10000)

print "Press any Key to continue..."
cv.WaitKey(20000)
コード例 #15
0
def cropImage(img, croparea):
    inbetween = cv.CreateImage(cv.GetSize(img), img.depth, img.nChannels)
    cv.Copy(img, inbetween)
    cv.SetImageROI(inbetween, croparea)
    return inbetween
コード例 #16
0
    def publish_stitched_image(self):
        """
        Checks to see if the sequences to images buffer contains all required frames and if so 
        produces and publishes a stitched image.
        """
        # Check to see if we have all images for a given sequence number
        for seq, image_dict in sorted(self.seq_to_images.items()): 

            # If we have all images for the sequece - stitch into merged view
            if len(image_dict) == len(self.camera_list):

                t0 = rospy.Time.now() 

                for i, camera in enumerate(self.camera_list):

                    # Convert ros image to opencv image
                    ros_image = image_dict[camera] 
                    cv_image = self.bridge.imgmsg_to_cv(
                            ros_image,
                            desired_encoding="passthrough"
                            )
                    ipl_image = cv.GetImage(cv_image)

                    # Create stitced image if it doesn't exist yet
                    if self.stitched_image is None:
                        self.stitched_image = cv.CreateImage(
                                self.image_size,
                                ipl_image.depth,
                                ipl_image.channels
                                )
                    if i==0:
                        cv.Zero(self.stitched_image)

                    # Set image warping flags - if first fill rest of image with zeros
                    if self.is_first_write:
                        warp_flags = self.warp_flags | cv.CV_WARP_FILL_OUTLIERS
                        self.is_first_write = False
                    else:
                        warp_flags = self.warp_flags

                    # Warp into stitched image 
                    cv.SetImageROI(self.stitched_image, self.tf_data[camera]['roi'])

                    # Warp stitched image
                    cv.WarpPerspective(
                            ipl_image, 
                            self.stitched_image, 
                            self.tf_data[camera]['matrix'],
                            flags=warp_flags,
                            )
                    cv.ResetImageROI(self.stitched_image)

                    # Get max and min time stamps for stamp_info
                    stamp = ros_image.header.stamp
                    if i == 0:
                        stamp_max = stamp 
                        stamp_min = stamp
                    else:
                        stamp_max = stamp if stamp.to_sec() > stamp_max.to_sec() else stamp_max 
                        stamp_mim = stamp if stamp.to_sec() < stamp_min.to_sec() else stamp_min 

                # Convert stitched image to ros image
                stitched_ros_image = self.bridge.cv_to_imgmsg(
                        self.stitched_image,
                        encoding="passthrough"
                        )
                stitched_ros_image.header.seq = seq
                self.image_pub.publish(stitched_ros_image)
                self.image_and_seq_pub.publish(seq, stitched_ros_image)
                self.seq_pub.publish(seq)

                # Compute time stamp spread
                stamp_diff = stamp_max.to_sec() - stamp_min.to_sec()
                self.stamp_pub.publish(stamp_max, stamp_min, stamp_diff)
                
                t1 = rospy.Time.now()
                dt = t1.to_sec() - t0.to_sec()
                self.processing_dt_pub.publish(dt,1.0/dt)

                # Remove data from buffer
                del self.seq_to_images[seq]

            # Throw away any stale data in seq to images buffer
            seq_age = self.get_seq_age(seq)
            if seq_age > self.max_seq_age:
                try:
                    del self.seq_to_images[seq]
                except KeyError:
                    pass
コード例 #17
0
def resize(img, width, height):
    smallImage = cv.CreateImage((height, width), img.depth, img.nChannels)
    cv.Resize(img, smallImage, interpolation=cv.CV_INTER_CUBIC)
    return smallImage
コード例 #18
0
def spectrum():
    array = numpy.zeros((480, 640, 3), dtype='uint8')
    for y in range(480):
        for x in range(640):
            h = int((255 * x) / 640)
            s = 255
            v = int((255 * y) / 480)
            array[y, x] = (h, s, v)
    hsv = cv.GetImage(cv.fromarray(array))
    return hsv


if len(args) == 0:
    hsv = spectrum()
    rgb = cv.CreateImage(cv.GetSize(hsv), 8, 3)
    cv.CvtColor(hsv, rgb, cv.CV_HSV2RGB)
else:
    img = cuav_util.LoadImage(args[0])
    (w, h) = cv.GetSize(img)
    img2 = cv.CreateImage((w / 2, h / 2), 8, 3)
    cv.Resize(img, img2)
    hsv = cv.CreateImage((w / 2, h / 2), 8, 3)
    cv.CvtColor(img2, hsv, cv.CV_RGB2HSV)
    rgb = cv.CreateImage(cv.GetSize(hsv), 8, 3)
    cv.CvtColor(hsv, rgb, cv.CV_HSV2RGB)

cv.ShowImage('HSV', rgb)
cv.SetMouseCallback('HSV', mouse_event, (rgb, hsv))
cuav_util.cv_wait_quit()
コード例 #19
0

def getThresholdImage(imgHSV):
    imgThresh = cv.CreateImage(cv.GetSize(imgHSV), IPL_DEPTH_8U, 1)
    cv.InRangeS(imgHSV, cv.Scalar(lowerH, lowerS, lowerV),
                cv.Scalar(upperH, upperS, UpperV), imgThresh)

    return imgThresh


def setWindows():
    cv.NamedWindow("Image")

    cv.CreateTrackbar("LowerH", "Image", lowerH, 180, None)
    cv.CreateTrackbar("UpperH", "Image", upperH, 180, None)
    cv.CreateTrackbar("LowerS", "Image", lowerS, 256, None)
    cv.CreateTrackbar("UpperS", "Image", upperS, 256, None)
    cv.CreateTrackbar("LowerV", "Image", lowerV, 256, None)
    cv.CreateTrackbar("UpperV", "Image", upperV, 256, None)


if __name__ == "__main__":

    setWindows()
    img = cv.LoadImage("test.png")
    imgHSV = cv.CreateImage(cv.Getsize(img), IPL_DEPTH_8U, 3)
    cv.CvtColor(img, imgHSV, CV_BGR2HSV)

    imgThresh = getThresholdImage(imgHSV)
    cv.ShowImage("img", imgThresh)
コード例 #20
0
def extractCards(fileName=None):
    """
    Given an image, this will extract the cards from it.

    This takes a filename as an optional argument
    This filename should be the name of an image file.

    This returns a dictionary of the form:
        (x, y) : Card image
    It is likely that the output from this will go to the
    getMeaningFromCards() function.
    """

    if fileName == None:
        mat = takeScreenCapture()
    else:
        mat = cv.LoadImage(fileName)

    # First crop the image: but only crop out the bottom.
    # It is useful to have all dimensions accurate to the screen
    # because otherwise they will throw off the mouse moving and clicking.
    # Cropping out the bottom does not change anything in terms of the mouse.
    unnec_top_distance = 130
    unnec_bottom_distance = 40
    margin = 50
    submat = cv.GetSubRect(
        mat, (0, 0, mat.width, mat.height - unnec_bottom_distance))
    subImg = cv.CreateImageHeader((submat.width, submat.height), 8, 3)
    cv.SetData(subImg, submat.tostring())

    gray = cv.CreateImage((submat.width, submat.height), 8, 1)
    cv.CvtColor(submat, gray, cv.CV_RGB2GRAY)

    thresh = 250
    max_value = 255
    cv.Threshold(gray, gray, thresh, max_value, cv.CV_THRESH_BINARY)

    cv.Not(gray, gray)
    #cv.ShowImage("sub", submat)
    #cv.WaitKey(0)

    storage = cv.CreateMemStorage(0)

    cpy = cv.CloneImage(gray)
    contours = cv.FindContours(cpy, storage, cv.CV_RETR_LIST,
                               cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))
    #contours = cv.ApproxPoly(contours, cv.CreateMemStorage(), cv.CV_POLY_APPROX_DP, 3, 1)

    bboxes = []

    if contours:
        while (contours):
            area = cv.ContourArea(contours)
            # It turns out that all the cards are about 44000 in area...
            # It would definitely be nice to have a better way to do this:
            # ie, find the size of the card programmatically and use it then
            if (area > 44000 and area < submat.width * submat.height * 2 / 3):
                bb = cv.BoundingRect(contours)
                bboxes.append(bb)
            contours = contours.h_next()

    #drawBoundingBoxes(bboxes, submat)

    # cards is a dictionary of the form:
    #    (x, y) : card
    cards = {}

    for box in bboxes:
        card = cv.GetSubRect(subImg, box)
        #cv.ShowImage("card", card)
        #cv.WaitKey(0)
        cards[(box[0], box[1])] = card

    return cards
コード例 #21
0
    def process_frame(self, frame):
        found_path = False
        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # use RGB color finder
        binary = libvision.cmodules.target_color_rgb.find_target_color_rgb(frame, 250, 125, 0, 1500, 500, .3)

        if self.debug:
            color_filtered = cv.CloneImage(binary)

        # Get Edges
        cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD,
                               rho=1,
                               theta=math.pi / 180,
                               threshold=self.hough_threshold,
                               param1=0,
                               param2=0
                               )
        lines = lines[:self.lines_to_consider]  # Limit number of lines

        # If there are at least 2 lines and they are close to parallel...
        # There's a path!
        if len(lines) >= 2:

            # Find: min, max, average
            theta_max = lines[0][1]
            theta_min = lines[0][1]
            total_theta = 0
            for rho, theta in lines:
                total_theta += theta
                if theta_max < theta:
                    theta_max = theta
                if theta_min > theta:
                    theta_min = theta

            theta_range = theta_max - theta_min
            # Near vertical angles will wrap around from pi to 0.  If the range
            # crosses this vertical line, the range will be way too large.  To
            # correct for this, we always take the smallest angle between the
            # min and max.
            if theta_range > math.pi / 2:
                theta_range = math.pi - theta_range

            if theta_range < self.theta_threshold:
                found_path = True

                angles = map(lambda line: line[1], lines)
                self.theta = circular_average(angles, math.pi)

        if found_path:
            self.seen_in_a_row += 1
        else:
            self.seen_in_a_row = 0

        # stores whether or not we are confident about the path's presence
        object_present = False

        if self.seen_in_a_row >= self.seen_in_a_row_threshold:
            object_present = True
            self.image_coordinate_center = self.find_centroid(binary)
            # Move the origin to the center of the image
            self.center = (
                self.image_coordinate_center[0] - frame.width / 2,
                self.image_coordinate_center[1] * -1 + frame.height / 2
            )

        if self.debug:

            # Show color filtered
            color_filtered_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(color_filtered, color_filtered_rgb, cv.CV_GRAY2RGB)
            cv.SubS(color_filtered_rgb, (255, 0, 0), color_filtered_rgb)
            cv.Sub(frame, color_filtered_rgb, frame)

            # Show edges
            binary_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(binary, binary_rgb, cv.CV_GRAY2RGB)
            cv.Add(frame, binary_rgb, frame)  # Add white to edge pixels
            cv.SubS(binary_rgb, (0, 0, 255), binary_rgb)
            cv.Sub(frame, binary_rgb, frame)  # Remove all but Red

            # Show lines
            if self.seen_in_a_row >= self.seen_in_a_row_threshold:
                rounded_center = (
                    int(round(self.image_coordinate_center[0])),
                    int(round(self.image_coordinate_center[1])),
                )
                cv.Circle(frame, rounded_center, 5, (0, 255, 0))
                libvision.misc.draw_lines(frame, [(frame.width / 2, self.theta)])
            else:
                libvision.misc.draw_lines(frame, lines)

            #cv.ShowImage("Path", frame)
            svr.debug("Path", frame)

        # populate self.output with infos
        self.output.found = object_present
        self.output.theta = self.theta

        if self.center:
            # scale center coordinates of path based on frame size
            self.output.x = self.center[0] / (frame.width / 2)
            self.output.y = self.center[1] / (frame.height / 2)
            libvision.misc.draw_linesC(frame, [(frame.width / 2, self.output.theta)],[255,0,255])
	    print "Output Returned!!! ", self.output.theta 
        else:
            self.output.x = None
            self.output.y = None
	    print "No output..."

        if self.output.found and self.center:
            print self.output

        self.return_output()
コード例 #22
0
def getMeaningFromCards(cards):
    """
    This takes a dictionary of the form:
         (x, y) : Card image
    and returns a dictionary of the form:
         (x, y) : (number, suit)

    (x, y) are the coordinates of the top left of the card
    """

    imgdir = "LibraryImages"
    templatesNums = os.listdir(os.path.join(imgdir, "Numbers"))
    templatesSuits = os.listdir(os.path.join(imgdir, "Suits"))

    #templates = filter(lambda s: s[-4:] == ".png", templates)
    templatesNums = map(lambda s: os.path.join(imgdir, "Numbers", s),
                        templatesNums)
    templatesSuits = map(lambda s: os.path.join(imgdir, "Suits", s),
                         templatesSuits)

    for k in cards.keys():
        card = cards[k]

        cardImg = cv.CreateImageHeader((card.width, card.height), 8, 3)
        cv.SetData(cardImg, card.tostring())

        numAndSuit3 = cv.GetSubRect(cardImg, (0, 0, 30, 80))

        numAndSuit1 = cv.CreateImage((numAndSuit3.width, numAndSuit3.height),
                                     8, 1)
        cv.CvtColor(numAndSuit3, numAndSuit1, cv.CV_RGB2GRAY)
        # Convert the 1 channel grayscale to 3 channel grayscale
        # (GRAY2RGB doesn't actually introduce color)
        cv.CvtColor(numAndSuit1, numAndSuit3, cv.CV_GRAY2RGB)

        num = findBestTemplateMatch(templatesNums, numAndSuit3)
        suit = findBestTemplateMatch(templatesSuits, numAndSuit3)
        #print num, suit

        # If this image was recognized as a card, but didn't match
        # any template, it shouldn't be in the list in the first place
        if num == None or suit == None:
            del cards[k]
            continue

        num = string.split(os.path.basename(num), '.')[0]
        suit = string.split(os.path.basename(suit), '.')[0]

        # The alternate file names have underscores
        # after their names
        if num[-1] == '_':
            num = num[:-1]

        if suit[-1] == '_':
            suit = suit[:-1]

        cards[k] = (num, suit)

        #cv.ShowImage("NumandSuit", numAndSuit)
        #cv.WaitKey(0)

    print cards
    return cards
コード例 #23
0
ファイル: py_viewer.py プロジェクト: weihli/cuav
lost = 0
while True:
    try:
        frame_time, frame_counter, shutter = chameleon.capture(h, 300, im)
    except chameleon.error, msg:
        lost += 1
        continue
    if frame_time < last_frame_time:
        base_time += 128
    save_queue.put((frame_time + base_time, im))
    last_frame_time = frame_time
    img_colour = numpy.zeros((960, 1280, 3), dtype='uint8')
    scanner.debayer(im, img_colour)
    img_colour = cv.GetImage(cv.fromarray(img_colour))
    if opts.half:
        img_640 = cv.CreateImage((640, 480), 8, 3)
        cv.Resize(img_colour, img_640)
        img_colour = img_640

    cv.ConvertScale(img_colour, img_colour, scale=opts.brightness)
    cv.ShowImage('Viewer', img_colour)
    key = cv.WaitKey(1)
    i += 1

    if i % 10 == 0:
        tdiff = time.time() - tstart
        print("captured %u  lost %u  %.1f fps shutter=%f" %
              (i, lost, 10 / tdiff, shutter))
        tstart = time.time()

    key = cv.WaitKey(1)
コード例 #24
0
        riro = int(ro - ri)
        t_n = int(cv_img_width * 1)
        p_s = log(d_c) / p_n  # p
        t_s = 2.0 * pi / (t_n)  # fi

        print "i_0: %f" % i_0
        print "j_0: %f" % j_0
        print "d_c: %f" % d_c
        print "p_n: %f" % p_n
        print "riro: %f" % riro
        print "t_n: %f" % t_n
        print "p_s: %f" % p_s
        print "t_s: %f" % t_s
        print "magic: %f" % magic

        transformed = cv.CreateImage((t_n, riro), cv_img_input.depth,
                                     cv_img_input.nChannels)

        for p in range(0, p_n):
            p_exp = exp((p + ri) * p_s)

            if p_exp >= img_insideR[im] and p_exp <= img_outsideR[im]:
                for t in range(0, t_n):

                    t_rad = t * t_s
                    i = int(i_0 + p_exp * sin(t_rad))
                    j = int(j_0 + p_exp * cos(t_rad))
                    #print ""
                    #print p
                    #print t_n-1-t
                    #print p_exp
                    if 0 <= i < cv_img_width and 0 <= j < cv_img_height:
コード例 #25
0
#!/usr/bin/python

import cv
tolerancia = 30     
        
imagen=cv.LoadImage('5.png')
cv.ShowImage('Prueba',imagen)

rojo      = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
verde     = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
azul      = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
amarillo  = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)

pixel_rojo     = [36,28,237]
pixel_verde    = [76,177,34]
pixel_azul     = [232,162,0]
pixel_amarillo = [164,73,163] 

cv.InRangeS(imagen,(pixel_rojo[0]-tolerancia,pixel_rojo[1]-tolerancia,pixel_rojo[2]-tolerancia),(pixel_rojo[0]+tolerancia,pixel_rojo[1]+tolerancia,pixel_rojo[2]+tolerancia),rojo)

cv.InRangeS(imagen,(pixel_verde[0]-tolerancia,pixel_verde[1]-tolerancia,pixel_verde[2]-tolerancia),(pixel_verde[0]+tolerancia,pixel_verde[1]+tolerancia,pixel_verde[2]+tolerancia),verde)

cv.InRangeS(imagen,(pixel_azul[0]-tolerancia,pixel_azul[1]-tolerancia,pixel_azul[2]-tolerancia),(pixel_azul[0]+tolerancia,pixel_azul[1]+tolerancia,pixel_azul[2]+tolerancia),azul)

cv.InRangeS(imagen,(pixel_amarillo[0]-tolerancia,pixel_amarillo[1]-tolerancia,pixel_amarillo[2]-tolerancia),(pixel_amarillo[0]+tolerancia,pixel_amarillo[1]+tolerancia,pixel_amarillo[2]+tolerancia),amarillo)

cv.ShowImage('Color Rojo'     ,rojo)
cv.ShowImage('Color Verde'    ,verde)
cv.ShowImage('Color Azul'     ,azul)
cv.ShowImage('Color Amarillo' ,amarillo)
コード例 #26
0
    if input_name.isdigit():
        capture = cv.CreateCameraCapture(int(input_name))
    else:
        capture = None

    cv.NamedWindow("result", 1)

    if capture:
        frame_copy = None
        while True:
            frame = cv.QueryFrame(capture)
            if not frame:
                cv.WaitKey(0)
                break
            if not frame_copy:
                frame_copy = cv.CreateImage((frame.width, frame.height),
                                            cv.IPL_DEPTH_8U, frame.nChannels)
            if frame.origin == cv.IPL_ORIGIN_TL:
                cv.Copy(frame, frame_copy)
            else:
                cv.Flip(frame, frame_copy, 0)

            detect_and_draw(frame_copy, cascade)

            if cv.WaitKey(10) >= 0:
                break
    else:
        image = cv.LoadImage(input_name, 1)
        detect_and_draw(image, cascade)
        cv.WaitKey(0)

    cv.DestroyWindow("result")
コード例 #27
0
def los():
    global frame
    global frame_size
    global gray_frame
    global canny_result
    global forrun
    global old_target
    global canny_init
    global canny_link
    global frame_on
    global canny_on
    global init_on
    global border_on
    global points_on
    global color_on
    global corridor
    global epsilon
    global angle
    global font
    global font_dist
    global subject
    global pub
    global mmat

    color_dst = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 3)

    if frame is None:
        return

    cv.CvtColor(frame, gray_frame, cv.CV_BGR2GRAY)

    ## 3. line detection in dst
    ##        canny_init - initial edge detction
    ##        canny_link - responsible for edge links
    cv.Canny(gray_frame, canny_result, get_canny_init(), get_canny_link(), 3)
    ## Look for all points detected
    # die linke und rechte Kante, die durch die Perspektivenkorrektur entstehen
    # interessieren uns nicht, deshalb links und rechts Pixel weglassen
    left_border = 20
    right_border = 620

    if init_on == True:
        points = numpy.nonzero(
            canny_result[(forrun - corridor / 2):(forrun + corridor / 2),
                         left_border:right_border])
        if points:
            for i in range(0, len(points[1])):
                points[1][i] = points[1][i] + left_border
    else:
        left = old_target[0] - epsilon
        if left < 0:
            left = 0
        right = old_target[0] + epsilon
        if right >= right_border:
            #if right>=frame_size[0]:
            #right=frame_size[0]-1
            right = right_border

        points = numpy.nonzero(
            canny_result[(forrun - corridor / 2):(forrun + corridor / 2),
                         left:right])
        if points:
            for i in range(0, len(points[1])):
                points[1][i] = points[1][i] + left

    cv.CvtColor(canny_result, color_dst, cv.CV_GRAY2BGR)

    if frame_on == True:
        frame = color_dst
    else:
        #cv.CvtColor( gray_frame, frame, cv.CV_GRAY2BGR );
        if color_on == False:
            cv.CvtColor(gray_frame, frame, cv.CV_GRAY2BGR)

    if border_on == True:
        start = (0, frame_size[1] / 2)
        end = (frame_size[0], frame_size[1] / 2)
        cv.Line(frame, start, end, cv.CV_RGB(0, 255, 0), 3, 8)
        start = (0, frame_size[1] / 2 + corridor / 2)
        end = (frame_size[0], frame_size[1] / 2 + corridor / 2)
        cv.Line(frame, start, end, cv.CV_RGB(0, 255, 0), 1, 8)
        start = (0, frame_size[1] / 2 - corridor / 2)
        end = (frame_size[0], frame_size[1] / 2 - corridor / 2)
        cv.Line(frame, start, end, cv.CV_RGB(0, 255, 0), 1, 8)

        start = (old_target[0] - epsilon, frame_size[1] / 2 - corridor / 2)
        end = (old_target[0] - epsilon, frame_size[1] / 2 + corridor / 2)
        cv.Line(frame, start, end, cv.CV_RGB(0, 255, 0), 1, 8)

        start = (old_target[0] + epsilon, frame_size[1] / 2 - corridor / 2)
        end = (old_target[0] + epsilon, frame_size[1] / 2 + corridor / 2)
        cv.Line(frame, start, end, cv.CV_RGB(0, 255, 0), 1, 8)

    # "Fadenkreuz" zum Ausrichten der Kamera
    start = (frame_size[0] / 2, (frame_size[1] / 2) + 30)
    end = (frame_size[0] / 2, (frame_size[1] / 2) - 30)
    cv.Line(frame, start, end, cv.CV_RGB(0, 255, 0), 1, 8)
    start = (frame_size[0] / 2 + 30, (frame_size[1] / 2))
    end = (frame_size[0] / 2 - 30, (frame_size[1] / 2))
    cv.Line(frame, start, end, cv.CV_RGB(0, 255, 0), 1, 8)
    if border_on == False:
        cv.Circle(frame, (frame_size[0] / 2, frame_size[1] / 2), 10,
                  cv.CV_RGB(0, 255, 0), 1)

    if len(points[1]) > 0:
        if points_on == True:
            for i in range(0, len(points[1])):
                cv.Circle(frame, (points[1][i], points[0][i] +
                                  (forrun - corridor / 2)), 5,
                          cv.CV_RGB(255, 255, 0), 2)
        left = numpy.max(points[1])
        right = numpy.min(points[1])
        center = int((left + right) / 2)
        old_target[0] = center

        if border_on == True:
            start = (center, (frame_size[1] / 2) + 30)
            end = (center, (frame_size[1] / 2) - 30)
            cv.Line(frame, start, end, cv.CV_RGB(0, 255, 0), 1, 8)

        left_points = []
        right_points = []
        left_points2 = []
        right_x_upper = -1
        right_x_lower = -1
        left_x_upper = -1
        left_x_lower = -1

        for i in range(0, len(points[1])):
            if points[1][i] < center:
                left_points.append(points[1][i])
                if left_x_upper == -1:
                    left_x_upper = points[1][i]
                left_x_lower = points[1][i]
            else:
                right_points.append(points[1][i])
                if right_x_upper == -1:
                    right_x_upper = points[1][i]
                right_x_lower = points[1][i]
        if len(left_points) > 0:
            diff_left = numpy.max(left_points) - numpy.min(left_points)
        if len(right_points) > 0:
            diff_right = numpy.max(right_points) - numpy.min(right_points)
        if len(left_points) > 0 and len(right_points) > 0:
            diff_mean = (diff_left + diff_right) / 2
        if len(left_points) > 0 and len(right_points) == 0:
            diff_mean = diff_left
        if len(left_points) == 0 and len(right_points) > 0:
            diff_mean = diff_right
        if diff_mean <> 0:
            angle = math.atan2(corridor, diff_mean)
            if left_x_upper != -1 & right_x_upper != -1:
                if (left_x_upper < left_x_lower) & (right_x_upper <
                                                    right_x_lower):
                    angle = math.pi - angle
        else:
            angle = math.pi / 2
    cv.Circle(frame, (old_target[0], old_target[1]), 5, cv.CV_RGB(255, 0, 0),
              2)

    if len(points[1]) > 0:
        start = (old_target[0], old_target[1])
        end = (int(old_target[0] + math.cos(angle) * 30),
               int(old_target[1] - math.sin(angle) * 30))
        cv.Line(frame, start, end, cv.CV_RGB(255, 0, 0), 3, 8)

    lp = LinePos()
    lp.x = old_target[0]
    lp.y = old_target[1]
    lp.angle = int(((angle * 18000 / math.pi) - 9000) * (-1))
    pub.publish(lp)
コード例 #28
0
import cv

capture = cv.CaptureFromCAM(-1)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 960)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 720)
im = cv.QueryFrame(capture)
cv.SaveImage("/home/pi/dish/capture/camera-test.jpg", im)

edges = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1)
cv.CvtColor(im, edges, cv.CV_BGR2GRAY)
thresh = 100
cv.Canny(edges, edges, thresh, thresh / 2, 3)
cv.Smooth(edges, edges, cv.CV_GAUSSIAN, 3, 3)
storage = cv.CreateMat(640, 1, cv.CV_32FC3)
cv.HoughCircles(edges, storage, cv.CV_HOUGH_GRADIENT, 2, edges.width / 10,
                thresh, 350, 0, 0)

# f = open("/home/pi/dish/sink-empty.txt", "w")
# for i in range(storage.rows):
#     val = storage[i, 0]
#     radius = int(val[2])
#     center = (int(val[0]), int(val[1]))
#     f.write(str(center[0]) + "," + str(center[1]) + "," + str(radius) + "\n")
#     cv.Circle(im, center, radius, (0, 255, 0), thickness=2)
# cv.SaveImage("/home/pi/dish/capture/sink-empty.jpg", im)

dirty = False
f = open("/home/pi/dish/sink-empty.txt", "r")
drains = []
for line in f:
    val = line.split(",")
コード例 #29
0

def on_mouse(event, x, y, flags, param):
    if event == cv.CV_EVENT_LBUTTONDOWN:
        print "clicked ", x, ", ", y, ": ", cropped[y, x]
        #global centerX
        #global centerY
        #centerX = x
        #centerY = y


if __name__ == '__main__':
    capture = cv.CaptureFromCAM(0)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
    polar = cv.CreateImage((360, 360), 8, 3)
    cropped = cv.CreateImage((360, 40), 8, 3)
    #hsvcopy = cv.CreateImage((360, 40), 8, 3)
    img = cv.CreateImage((320, 240), 8, 3)

    cones = cv.CreateImage((360, 40), 8, 1)

    #arr = cv.CreateImage((360, 40), 8, 1)
    #gee = cv.CreateImage((360, 40), 8, 1)
    #bee = cv.CreateImage((360, 40), 8, 1)

    #hue = cv.CreateImage((360, 40), 8, 1)
    #sat = cv.CreateImage((360, 40), 8, 1)
    #val = cv.CreateImage((360, 40), 8, 1)

    cv.NamedWindow('cam')
コード例 #30
0
 def __init__(self, image, zoom_out, id):
     self.image = image
     self.view_image = cv.CreateImage( (image.width, image.height), image.depth, 3)
     self.click_pt = None
     self.id = id
     ZoomWindow.__init__(self,"Window %d"%id,-1,zoom_out)