예제 #1
0
파일: geosearch.py 프로젝트: MorS25/cuav
def process(args):
    '''process a set of files'''

    global slipmap, mosaic
    scan_count = 0
    files = []
    for a in args:
        if os.path.isdir(a):
            files.extend(file_list(a, ['jpg', 'pgm', 'png']))
        else:
            if a.find('*') != -1:
                files.extend(glob.glob(a))
            else:
                files.append(a)
    files.sort()
    num_files = len(files)
    print("num_files=%u" % num_files)
    region_count = 0

    slipmap = mp_slipmap.MPSlipMap(service=opts.service,
                                   elevation=True,
                                   title='Map')
    icon = slipmap.icon('redplane.png')
    slipmap.add_object(
        mp_slipmap.SlipIcon('plane', (0, 0),
                            icon,
                            layer=3,
                            rotation=0,
                            follow=True,
                            trail=mp_slipmap.SlipTrail()))

    for flag in opts.flag:
        a = flag.split(',')
        lat = a[0]
        lon = a[1]
        icon = 'flag.png'
        if len(a) > 2:
            icon = a[2] + '.png'
            icon = slipmap.icon(icon)
            slipmap.add_object(
                mp_slipmap.SlipIcon('icon - %s' % str(flag),
                                    (float(lat), float(lon)),
                                    icon,
                                    layer=3,
                                    rotation=0,
                                    follow=False))

    if opts.mission:
        from pymavlink import mavwp
        wp = mavwp.MAVWPLoader()
        wp.load(opts.mission)
        plist = wp.polygon_list()
        if len(plist) > 0:
            for i in range(len(plist)):
                slipmap.add_object(
                    mp_slipmap.SlipPolygon('Mission-%s-%u' % (opts.mission, i),
                                           plist[i],
                                           layer='Mission',
                                           linewidth=2,
                                           colour=(255, 255, 255)))

    if opts.mavlog:
        mpos = mav_position.MavInterpolator()
        mpos.set_logfile(opts.mavlog)
    else:
        mpos = None

    if opts.gammalog is not None:
        gamma = parse_gamma_log(opts.gammalog)
    else:
        gamma = None

    if opts.kmzlog:
        kmzpos = mav_position.KmlPosition(opts.kmzlog)
    else:
        kmzpos = None

    if opts.triggerlog:
        triggerpos = mav_position.TriggerPosition(opts.triggerlog)
    else:
        triggerpos = None

    # create a simple lens model using the focal length
    C_params = cam_params.CameraParams(lens=opts.lens,
                                       sensorwidth=opts.sensorwidth)

    if opts.camera_params:
        C_params.load(opts.camera_params)

    if opts.target:
        target = opts.target.split(',')
    else:
        target = [0, 0, 0]

    camera_settings = MPSettings([
        MPSetting('roll_stabilised', bool, opts.roll_stabilised,
                  'Roll Stabilised'),
        MPSetting('altitude',
                  int,
                  opts.altitude,
                  'Altitude',
                  range=(0, 10000),
                  increment=1),
        MPSetting(
            'minalt', int, 30, 'MinAltitude', range=(0, 10000), increment=1),
        MPSetting('mpp100',
                  float,
                  0.0977,
                  'MPPat100m',
                  range=(0, 10000),
                  increment=0.001),
        MPSetting('rotate180', bool, opts.rotate_180, 'rotate180'),
        MPSetting('filter_type',
                  str,
                  'compactness',
                  'Filter Type',
                  choice=['simple', 'compactness']),
        MPSetting('target_lattitude',
                  float,
                  float(target[0]),
                  'target latitude',
                  increment=1.0e-7),
        MPSetting('target_longitude',
                  float,
                  float(target[1]),
                  'target longitude',
                  increment=1.0e-7),
        MPSetting('target_radius',
                  float,
                  float(target[2]),
                  'target radius',
                  increment=1),
        MPSetting('quality',
                  int,
                  75,
                  'Compression Quality',
                  range=(1, 100),
                  increment=1),
        MPSetting('thumbsize',
                  int,
                  opts.thumbsize,
                  'Thumbnail Size',
                  range=(10, 200),
                  increment=1),
        MPSetting('minscore',
                  int,
                  opts.minscore,
                  'Min Score',
                  range=(0, 1000),
                  increment=1,
                  tab='Scoring'),
        MPSetting('brightness',
                  float,
                  1.0,
                  'Display Brightness',
                  range=(0.1, 10),
                  increment=0.1,
                  digits=2,
                  tab='Display'),
    ],
                                 title='Camera Settings')

    image_settings = MPSettings([
        MPSetting('MinRegionArea',
                  float,
                  0.05,
                  range=(0, 100),
                  increment=0.05,
                  digits=2,
                  tab='Image Processing'),
        MPSetting('MaxRegionArea',
                  float,
                  4.0,
                  range=(0, 100),
                  increment=0.1,
                  digits=1),
        MPSetting('MinRegionSize',
                  float,
                  0.02,
                  range=(0, 100),
                  increment=0.05,
                  digits=2),
        MPSetting('MaxRegionSize',
                  float,
                  3.0,
                  range=(0, 100),
                  increment=0.1,
                  digits=1),
        MPSetting('MaxRarityPct',
                  float,
                  0.02,
                  range=(0, 100),
                  increment=0.01,
                  digits=2),
        MPSetting('RegionMergeSize',
                  float,
                  1.0,
                  range=(0, 100),
                  increment=0.1,
                  digits=1),
        MPSetting('BlueEmphasis', bool, opts.blue_emphasis),
        MPSetting('SaveIntermediate', bool, opts.debug)
    ],
                                title='Image Settings')

    mosaic = cuav_mosaic.Mosaic(slipmap,
                                C=C_params,
                                camera_settings=camera_settings,
                                image_settings=image_settings,
                                start_menu=True,
                                classify=opts.categories,
                                thumb_size=opts.mosaic_thumbsize)

    joelog = cuav_joe.JoeLog(None)

    if opts.view:
        viewer = mp_image.MPImage(title='Image', can_zoom=True, can_drag=True)

    for f in files:
        if not mosaic.started():
            print("Waiting for startup")
            while not mosaic.started():
                mosaic.check_events()
                time.sleep(0.01)

        if mpos:
            # get the position by interpolating telemetry data from the MAVLink log file
            # this assumes that the filename contains the timestamp
            if gamma is not None:
                frame_time = parse_gamma_time(f, gamma)
            else:
                frame_time = cuav_util.parse_frame_time(f)
            frame_time += opts.time_offset
            if camera_settings.roll_stabilised:
                roll = 0
            else:
                roll = None
            try:
                pos = mpos.position(frame_time, roll=roll)
            except Exception:
                print("No position available for %s" % frame_time)
                # skip this frame
                continue
        elif kmzpos is not None:
            pos = kmzpos.position(f)
        elif triggerpos is not None:
            pos = triggerpos.position(f)
        else:
            # get the position using EXIF data
            pos = mav_position.exif_position(f)
            pos.time += opts.time_offset

        # update the plane icon on the map
        if pos is not None:
            slipmap.set_position('plane', (pos.lat, pos.lon), rotation=pos.yaw)
            if camera_settings.altitude > 0:
                pos.altitude = camera_settings.altitude

        # check for any events from the map
        slipmap.check_events()
        mosaic.check_events()

        im_orig = cuav_util.LoadImage(f, rotate180=camera_settings.rotate180)
        if im_orig is None:
            continue
        (w, h) = cuav_util.image_shape(im_orig)

        if not opts.camera_params:
            C_params.set_resolution(w, h)

        im_full = im_orig

        im_640 = cv.CreateImage((640, 480), 8, 3)
        cv.Resize(im_full, im_640, cv.CV_INTER_NN)
        im_640 = numpy.ascontiguousarray(cv.GetMat(im_640))
        im_full = numpy.ascontiguousarray(cv.GetMat(im_full))

        count = 0
        total_time = 0

        t0 = time.time()
        img_scan = im_full

        scan_parms = {}
        for name in image_settings.list():
            scan_parms[name] = image_settings.get(name)
        scan_parms['SaveIntermediate'] = float(scan_parms['SaveIntermediate'])
        scan_parms['BlueEmphasis'] = float(scan_parms['BlueEmphasis'])

        if pos is not None:
            (sw, sh) = cuav_util.image_shape(img_scan)
            altitude = pos.altitude
            if altitude < camera_settings.minalt:
                altitude = camera_settings.minalt
            scan_parms[
                'MetersPerPixel'] = camera_settings.mpp100 * altitude / 100.0

            regions = scanner.scan(img_scan, scan_parms)
        else:
            regions = scanner.scan(img_scan)
        regions = cuav_region.RegionsConvert(regions,
                                             cuav_util.image_shape(img_scan),
                                             cuav_util.image_shape(im_full))
        count += 1
        t1 = time.time()

        frame_time = pos.time

        if pos:
            for r in regions:
                r.latlon = cuav_util.gps_position_from_image_region(
                    r, pos, w, h, altitude=altitude)

            if camera_settings.target_radius > 0 and pos is not None:
                regions = cuav_region.filter_radius(
                    regions, (camera_settings.target_lattitude,
                              camera_settings.target_longitude),
                    camera_settings.target_radius)

        regions = cuav_region.filter_regions(
            im_full,
            regions,
            frame_time=frame_time,
            min_score=camera_settings.minscore,
            filter_type=camera_settings.filter_type)

        scan_count += 1

        if pos and len(regions) > 0:
            altitude = camera_settings.altitude
            if altitude <= 0:
                altitude = None
            joelog.add_regions(frame_time,
                               regions,
                               pos,
                               f,
                               width=w,
                               height=h,
                               altitude=altitude)

        mosaic.add_image(pos.time, f, pos)

        region_count += len(regions)

        if len(regions) > 0:
            composite = cuav_mosaic.CompositeThumbnail(
                cv.GetImage(cv.fromarray(im_full)), regions)
            thumbs = cuav_mosaic.ExtractThumbs(composite, len(regions))
            mosaic.add_regions(regions, thumbs, f, pos)

        if opts.view:
            img_view = img_scan
            (wview, hview) = cuav_util.image_shape(img_view)
            mat = cv.fromarray(img_view)
            for r in regions:
                r.draw_rectangle(mat, (255, 0, 0))
            cv.CvtColor(mat, mat, cv.CV_BGR2RGB)
            viewer.set_image(mat)
            viewer.set_title('Image: ' + os.path.basename(f))
            if opts.saveview:
                cv.CvtColor(mat, mat, cv.CV_RGB2BGR)
                cv.SaveImage('view-' + os.path.basename(f), mat)

        total_time += (t1 - t0)
        if t1 != t0:
            print('%s scan %.1f fps  %u regions [%u/%u]' %
                  (os.path.basename(f), count / total_time, region_count,
                   scan_count, num_files))
예제 #2
0
except ValueError:
    print('err: kernel width and height must be integers', file=sys.stderr)
except IndexError:
    print('err: incorrect number of arguments', file=sys.stderr)
    print('usage: label.py <image> <mask> <kernel size>', file=sys.stderr)
    sys.exit(1)

writer = csv.writer(sys.stdout)
img_rgb = cv.LoadImageM(file_clr, cv.CV_LOAD_IMAGE_COLOR)
img_tru = cv.LoadImageM(file_tru, cv.CV_LOAD_IMAGE_GRAYSCALE)

if ker_size > 0:
    cv.Smooth(img_rgb, img_rgb, cv.CV_GAUSSIAN, ker_size, ker_size)

img_hsv = cv.CreateMat(img_rgb.rows, img_rgb.cols, cv.CV_8UC3)
cv.CvtColor(img_rgb, img_hsv, cv.CV_BGR2HSV)

assert (img_rgb.rows == img_tru.rows)
assert (img_rgb.cols == img_tru.cols)

# Build a list of all the data in-memory.
pixels = itertools.product(range(0, img_rgb.rows), range(0, img_rgb.cols))
labels = {0: [], 1: []}

for (y, x) in pixels:
    rgb = map(int, img_rgb[y, x])
    hsv = map(int, img_hsv[y, x])
    label = int(int(img_tru[y, x]) > 127)
    labels[label].append(rgb + hsv + [label])

# Force both sets to be of equal size.
예제 #3
0
def hsv_image(image):
    hsv = cv.CreateImage(cv.GetSize(image), image.depth, 3)
    cv.CvtColor(image, hsv, cv.CV_BGR2HSV)
    return hsv
예제 #4
0
def cam_getimage(show=False,
                 dfcorr=True,
                 raw=False,
                 showhisto=True,
                 waitkey=25):
    """
	Get image from the camera, convert, scale, dark-flat correct,
	optionally show this and return as numpy.ndarray.

	If **raw* is set, return the (scaled/ROI'd) image as CvImage

	If CAM_CFG['flat'] or CAM_CFG['dark'] are set, use these to dark-flat 
	correct the image.

	@param [in] show Show image after acquisition
	@param [in] dfcorr Do dark-flat correction
	@param [in] raw Return raw IplImage (scaled and ROI'd, w/o DF correction)
	@param [in] showhisto Show histogram as well (only with **show**)
	@param [in] waitkey Wait time for cv.WaitKey() If 0, don't call. (only with **show**)
	@return Image data as numpy.ndarray
	"""

    if (not CAM_CFG['handle']): return

    rawframe = cv.CloneImage(cv.QueryFrame(CAM_CFG['handle']))

    # Downscale color images
    if (rawframe.channels > 1):
        rawsz = cv.GetSize(rawframe)
        if (not CAM_CFG.has_key('rawyuv') or not CAM_CFG['rawyuv']):
            CAM_CFG['rawyuv'] = cv.CreateImage(rawsz, rawframe.depth, 3)
            CAM_CFG['rawgray'] = cv.CreateImage(rawsz, rawframe.depth, 1)
        cv.CvtColor(rawframe, CAM_CFG['rawyuv'], cv.CV_BGR2YCrCb)
        cv.Split(CAM_CFG['rawyuv'], CAM_CFG['rawgray'], None, None, None)
        rawframe = CAM_CFG['rawgray']

    if (CAM_CFG['roi']):
        rawframe = cv.GetSubRect(rawframe, tuple(CAM_CFG['roi']))

    procf = CAM_CFG['frame']
    cv.ConvertScale(rawframe, procf, scale=1.0 / 256)

    if (raw):
        return cv.CloneImage(procf)

    if (CAM_CFG.has_key('dark') and dfcorr):
        cv.Sub(procf, CAM_CFG['dark'], procf)
    if (CAM_CFG.has_key('flat') and dfcorr):
        cv.Div(procf, CAM_CFG['flat'], procf)
    # We *don't* apply the aperture mask here because we might need the data

    if (show):
        cv.ShowImage(CAM_CFG['window'], procf)
        if (showhisto):
            global camhist, camhistimg
            camhist, camhistimg = calc_1dhisto(procf,
                                               hist=camhist,
                                               histimg=camhistimg)
            cv.ShowImage("cam_histogram", camhistimg)
        if (waitkey):
            cv.WaitKey(waitkey)

    depth2dtype = {
        cv.IPL_DEPTH_32F: 'float32',
        cv.IPL_DEPTH_64F: 'float64',
    }

    framearr = np.fromstring(procf.tostring(),
                             dtype=depth2dtype[procf.depth],
                             count=procf.width * procf.height *
                             procf.nChannels)
    framearr.shape = (procf.height, procf.width, procf.nChannels)

    return framearr[:, :, 0]
예제 #5
0
파일: TrackLine.py 프로젝트: steup/FAMOUSO
subject = "WayPoint"
pub = publisher.PublisherEventChannel(subject)
pub.announce(subject)

while True:
  
    color_dst = cv.CreateImage( frame_size, cv.IPL_DEPTH_8U, 3 )
    
    ## 1. capture the current image
    frame = cv.QueryFrame(capture)
    if frame is None:
      break
      
    ## 2. copy current frame to a gray scale frame -> dst
    cv.CvtColor( frame, gray_frame, cv.CV_BGR2GRAY );

    ## 3. line detection in dst 
    ##        canny_init - initial edge detction
    ##        canny_link - responsible for edge links
    cv.Canny(gray_frame, canny_result, canny_init, canny_link, 3);
    ## Look for all points detected
    if init_on==True:
      points = numpy.nonzero(canny_result[(forrun-corridor/2):(forrun+corridor/2),:])
    else:
      left = old_target[0]-epsilon
      if left<0:
	left=0
      right = old_target[0]+epsilon
      if right>=frame_size[0]:
	right=frame_size[0]-1
예제 #6
0
    def run(self):
        self.frame_count = 0
        sms_count = 0
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        first = True

        while True:
            schedule.run_pending()
            color_image = cv.QueryFrame(self.capture)
            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)
            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)
            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)
            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)
            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                self.frame_count += 1
                sms_count += 1
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()
                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

            if len(points):
                center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 100, 0), 1)

            cv.ShowImage("PyLocker", color_image)
            #send frame each x frames
            if self.frame_count >= self.treshold:
                self.frame_count = 0
                self.last_movement = time.time()

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
예제 #7
0
def main():
	project_root_dir = sys.argv[1]
	os.chdir(project_root_dir)
	os.chdir(os.path.join(OUTPUT_DIR_NAME, OUTPUT_DIR_NAME))
	
	output_img = cv.CreateImage((WIDTH, WIDTH), cv.IPL_DEPTH_8U, 3)
	
	print os.system("identify -format \"%k\" result.png")
	print "reducing colors to 10"
	os.system("convert result.png +dither -colors 10 result_quant.png")
	
	img_orig = cv.LoadImageM("result_quant.png")
	output_img = cv.CreateImage((WIDTH, WIDTH), cv.IPL_DEPTH_8U, 3)
	
	img_hls = cv.CreateImage(cv.GetSize(img_orig), cv.IPL_DEPTH_8U, 3)
	cv.CvtColor(img_orig, img_hls, cv.CV_BGR2HLS)
	
	pixels = numpy.asarray(cv.GetMat(img_hls))
	d = {}
	
	print "counting..."
	for line in pixels:
		for px in line:
			if tuple(px) in d:
				d[tuple(px)] += 1
			else:
				d[tuple(px)] = 1
	
	colors = d.keys()
	#print "%d pixels, %d colors" % (img_orig.width*img_orig.height, len(colors))
	
	print "sorting..."
	#colors.sort(hls_sort)
	colors = sort_by_distance(colors)
	
	px_count = img_orig.width * img_orig.height
	x_pos = 0
	
	print "building image..."
	for color in colors:
		l = d[color] / float(px_count)
		l = int(math.ceil( l*WIDTH ))
		
		for x in range(l):
			if x_pos+x >= WIDTH:
					break
			for y in range(WIDTH):
				cv.Set2D(output_img, y, x_pos+x, (int(color[0]), int(color[1]), int(color[2])))
		x_pos += l
	
	print "saving..."
	output_img_rgb = cv.CreateImage(cv.GetSize(output_img), cv.IPL_DEPTH_8U, 3)
	cv.CvtColor(output_img, output_img_rgb, cv.CV_HLS2BGR)
	cv.SaveImage("_RESULT.png", output_img_rgb)
	
	os.chdir( r"..\.." )
	f = open("colors.txt", "w")
	row = cv.GetRow(output_img_rgb, 0)
	
	counter = 0
	last_px = cv.Get1D(row, 0)
	for i in range(WIDTH):
		px = cv.Get1D(row, i)
		if px == last_px:
			counter += 1
			if i == WIDTH-1:
				f.write("%d, %d, %d, %d\n" % (int(last_px[2]), int(last_px[1]), int(last_px[0]), counter))
			continue
		else:
			f.write("%d, %d, %d, %d\n" % (int(last_px[2]), int(last_px[1]), int(last_px[0]), counter))
			counter = 1
			last_px = px
	f.close()
	
	return
예제 #8
0
    def run(self):
        # Initialize
        #log_file_name = "tracker_output.log"
        #log_file = file( log_file_name, 'a' )

        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame),
                                               cv.IPL_DEPTH_32F, 3)
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)

        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)

        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)

        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []

        t0 = time.time()

        # For toggling display:
        image_list = ["camera", "difference", "threshold", "display", "faces"]
        image_index = 0  # Index into image_list

        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1,
                                cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        ###############################
        ### Face detection stuff
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        haar_cascade = cv.Load('haarcascades/haarcascade_frontalface_alt.xml')
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )

        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 3

        while True:

            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)

            frame_count += 1
            frame_t0 = time.time()

            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)

            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image,
                            running_average_in_display_color_depth, 1.0, 0.0)

            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth,
                       difference)

            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)
            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)

            grey_image_as_array = numpy.asarray(cv.GetMat(grey_image))
            non_black_coords_array = numpy.where(grey_image_as_array > 3)
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1],
                                         non_black_coords_array[0])

            points = [
            ]  # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []

            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours(grey_image, mem_storage,
                                      cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            while contour:

                bounding_rect = cv.BoundingRect(list(contour))
                point1 = (bounding_rect[0], bounding_rect[1])
                point2 = (bounding_rect[0] + bounding_rect[2],
                          bounding_rect[1] + bounding_rect[3])

                bounding_box_list.append((point1, point2))
                polygon_points = cv.ApproxPoly(list(contour), mem_storage,
                                               cv.CV_POLY_APPROX_DP)

                # To track polygon points only (instead of every pixel):
                #points += list(polygon_points)

                # Draw the contours:
                ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly(grey_image, [
                    list(polygon_points),
                ], cv.CV_RGB(255, 255, 255), 0, 0)
                cv.PolyLine(display_image, [
                    polygon_points,
                ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)

                contour = contour.h_next()

            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append(box_width * box_height)

                #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)

            average_box_area = 0.0
            if len(box_areas):
                average_box_area = float(sum(box_areas)) / len(box_areas)

            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]

                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area * 0.1:
                    trimmed_box_list.append(box)

            # Draw the trimmed box list:
            #for box in trimmed_box_list:
            #	cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )

            bounding_box_list = merge_collided_bboxes(trimmed_box_list)

            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle(display_image, box[0], box[1],
                             cv.CV_RGB(0, 255, 0), 1)

            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len(bounding_box_list)

            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.

            if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1:
                    estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1:
                    estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0

            # Clip to the user-supplied maximum:
            estimated_target_count = min(estimated_target_count, max_targets)

            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.

            # Using the numpy values directly (treating all pixels as points):
            points = non_black_coords_array
            center_points = []

            if len(points):

                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max(estimated_target_count,
                                 1)  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook

                #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans(array(points), k_or_guess)

                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = (int(center_point[0]), int(center_point[1]))
                    center_points.append(center_point)
                    #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)

            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []

            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []

                for center_point in center_points:
                    if center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                     center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :

                        # This point is within the box.
                        center_points_in_box.append(center_point)

                # Now see if there are more than one.  If so, merge them.
                if len(center_points_in_box) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])

                    average_x = int(float(sum(x_list)) / len(x_list))
                    average_y = int(float(sum(y_list)) / len(y_list))

                    trimmed_center_points.append((average_x, average_y))

                    # Record that they were removed:
                    removed_center_points += center_points_in_box

                if len(center_points_in_box) == 1:
                    trimmed_center_points.append(
                        center_points_in_box[0])  # Just use it.

            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (
                        not center_point in removed_center_points):
                    trimmed_center_points.append(center_point)

            # Draw what we found:
            #for center_point in trimmed_center_points:
            #	center_point = ( int(center_point[0]), int(center_point[1]) )
            #	cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #	cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #	cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #	cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)

            # Determine if there are any new (or lost) targets:
            actual_target_count = len(trimmed_center_points)
            last_target_count = actual_target_count

            # Now build the list of physical entities (objects)
            this_frame_entity_list = []

            # An entity is list: [ name, color, last_time_seen, last_known_coords ]

            for target in trimmed_center_points:

                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}

                for entity in last_frame_entity_list:

                    entity_coords = entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]

                    distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                    entity_distance_dict[distance] = entity

                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()

                for distance in distance_list:

                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[distance]

                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                        continue

                    #print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[
                        2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[
                        3] = target  # Update the new location
                    this_frame_entity_list.append(nearest_possible_entity)
                    #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break

                if entity_found == False:
                    # It's a new entity.
                    color = (random.randint(0, 255), random.randint(0, 255),
                             random.randint(0, 255))
                    name = hashlib.md5(str(frame_t0) +
                                       str(color)).hexdigest()[:6]
                    last_time_seen = frame_t0

                    new_entity = [name, color, last_time_seen, target]
                    this_frame_entity_list.append(new_entity)
                    #log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.

            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append(entity)

            # For next frame:
            last_frame_entity_list = this_frame_entity_list

            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10,
                          cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point, 5,
                          cv.CV_RGB(c[0], c[1], c[2]), 3)

            #print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break

            # Toggle which image to show
            if chr(c) == 'd':
                image_index = (image_index + 1) % len(image_list)

            image_name = image_list[image_index]

            # Display frame to user
            if image_name == "camera":
                image = camera_image
                cv.PutText(image, "Camera (Normal)", text_coord, text_font,
                           text_color)
            elif image_name == "difference":
                image = difference
                cv.PutText(image, "Difference Image", text_coord, text_font,
                           text_color)
            elif image_name == "display":
                image = display_image
                cv.PutText(image, "Targets (w/AABBs and contours)", text_coord,
                           text_font, text_color)
            elif image_name == "threshold":
                # Convert the image to color.
                cv.CvtColor(grey_image, display_image, cv.CV_GRAY2RGB)
                image = display_image  # Re-use display image here
                cv.PutText(image, "Motion Mask", text_coord, text_font,
                           text_color)
            elif image_name == "faces":
                # Do face detection
                detect_faces(camera_image, haar_cascade, mem_storage)
                image = camera_image  # Re-use camera image here
                cv.PutText(image, "Face Detection", text_coord, text_font,
                           text_color)

            cv.ShowImage("Target", image)

            if self.writer:
                cv.WriteFrame(self.writer, image)

            #log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta,
                                                  processed_fps)
예제 #9
0
# Prepare additional object for grayscale version of the image
grayscale = cv.CreateImage(size, 8, 1)

# And the font for text drawing
font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX_SMALL, 2, 2, 0, 1, 8)

# Initialize QR decoder
decoder = quirc.Decoder(*size)

while True:
    # Query new frame
    frame = cv.QueryFrame(capture)

    # Make a grayscale copy
    cv.CvtColor(frame, grayscale, cv.CV_BGR2GRAY)

    for code in decoder.decode(grayscale.tostring()):

        # Draw a countours for each QR code
        # TODO: replace with a cv.PolyLine
        cv.Line(frame, (code.corners[0][0], code.corners[0][1]),
                (code.corners[1][0], code.corners[1][1]), (0, 255, 0))
        cv.Line(frame, (code.corners[1][0], code.corners[1][1]),
                (code.corners[2][0], code.corners[2][1]), (0, 255, 0))
        cv.Line(frame, (code.corners[2][0], code.corners[2][1]),
                (code.corners[3][0], code.corners[3][1]), (0, 255, 0))
        cv.Line(frame, (code.corners[3][0], code.corners[3][1]),
                (code.corners[0][0], code.corners[0][1]), (0, 255, 0))

        # And a decoded text for each one
예제 #10
0
    def OnIdle( self, ):
        """Request refresh of the context whenever idle.
        track, get position, update camera, then redraw"""
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
        backproject_mode = False
        while True:
            frame = cv.QueryFrame(self.capture)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject( [self.hue], backproject, hist )
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x,y,w,h = self.selection
                cv.Rectangle(frame, (x,y), (x+w,y+h), (0,0,255))

                sel = cv.GetSubRect(self.hue, self.selection )
                cv.CalcArrHist( [sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue( hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )

# find centroid coordinate (x,y) and area (z)
                selection_centroid = track_box[0]
                global xposition
                xposition = selection_centroid[0]
                global yposition
                yposition = selection_centroid[1]
                width_height = track_box[1]


# writes output of coordinates to seed file if needed
                # with open('seed.txt', 'a') as f:
                #     value = (xposition, yposition)
                #     s = str(value) + '\n'
                #     f.write(s)
                #     # f.write('end_of_session')
                # f.close()

# print outs
                print "x: " + str(xposition)
                print "y: " + str(yposition)
                selection_area = width_height[0]*width_height[1]
                # print "The width is: " + str(width_height[0]) + " The height is: " + str(width_height[1])
                # print "centroid is: " + str(selection_centroid)
                # return "centroid is: " + str(selection_centroid)
                print "area: " + str(selection_area)
                # return "area is: " + str(selection_area)

            if not backproject_mode:
                cv.ShowImage( "CamShiftDemo", frame )
            else:
                cv.ShowImage( "CamShiftDemo", backproject)
            cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(10)
            if c == 27: # escape key
                break
            elif c == ord("b"): # show backproject mode with "b" key
                backproject_mode = not backproject_mode

        self.triggerRedraw(1)        
        return 1
예제 #11
0
    def runColor(self):
        self.tracking = False
        self.lasttrack = None
        self.hang_around_seconds = 5
        color_tracker_window = "Preston HackSpace 2013 BarCamp Project"
        cv.NamedWindow(color_tracker_window, 1)
        self.capture = cv.CaptureFromCAM(0)
        count = 0
        while True:

            img = cv.QueryFrame(self.capture)
            img2 = cv.QueryFrame(self.capture)
            #blur the source image to reduce color noise
            cv.Smooth(img, img, cv.CV_BLUR, 3)

            #convert the image to hsv(Hue, Saturation, Value) so its
            #easier to determine the color to track(hue)

            hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
            cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

            #limit all pixels that don't match our criteria, in this case we are
            #looking for purple but if you want you can adjust the first value in
            #both turples which is the hue range(120,140).  OpenCV uses 0-180 as
            #a hue range for the HSV color model

            #Orange  0-22
            #Yellow 22- 38
            #Green 38-75
            #Blue 75-130
            #Violet 130-160
            #Red 160-179

            thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            cv.InRangeS(hsv_img, (0, 120, 120), (15, 255, 255),
                        thresholded_img)
            #cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255), thresholded_img)
            #determine the objects moments and check that the area is large
            #enough to be our object
            #moments = cv.Moments(thresholded_img, 0)
            moments = cv.Moments(cv.GetMat(thresholded_img), 0)
            area = cv.GetCentralMoment(moments, 0, 0)

            #there can be noise in the video so ignore objects with small areas
            if (area > 100000):
                self.tracking = True
                self.lasttrack = time.time()
                #determine the x and y coordinates of the center of the object
                #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
                x = cv.GetSpatialMoment(moments, 1, 0) / area
                y = cv.GetSpatialMoment(moments, 0, 1) / area

                #Write the x,y coords to a file for the pyFirmata code to use for controlling the Arduino
                self.WriteXY(x, y)

                #create an overlay to mark the center of the tracked object
                overlay = cv.CreateImage(cv.GetSize(img), 8, 3)

                #cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20)
                cv.Circle(img, (int(x), int(y)), 2, (255, 255, 255), 20)
                cv.Add(img, overlay, img)
                #add the thresholded image back to the img so we can see what was
                #left after it was applied
                cv.Merge(thresholded_img, None, None, None, img)
            else:
                if self.tracking == True:
                    #We have just lost track of the object we need to hang around for a bit
                    #to see if the object comes back.
                    self.WriteXY(-2, -2)
                    if time.time(
                    ) >= self.lasttrack + self.hang_around_seconds:
                        self.tracking = False

                if self.tracking == False:
                    self.WriteXY(-1, -1)
            #display the image
            cv.ShowImage(color_tracker_window, img2)

            if cv.WaitKey(10) == 27:
                break
예제 #12
0
def main():

    # create windows
    create_and_position_window('Thresholded_HSV_Image', 10, 10)
    create_and_position_window('RGB_VideoFrame', 10 + cam_width, 10)

    create_and_position_window('Hue', 10, 10 + cam_height)
    create_and_position_window('Saturation', 210, 10 + cam_height)
    create_and_position_window('Value', 410, 10 + cam_height)
    create_and_position_window('LaserPointer', 0, 0)

    capture = setup_camera_capture()

    # create images for the different channels
    h_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    s_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    v_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    laser_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    cv.SetZero(h_img)
    cv.SetZero(s_img)
    cv.SetZero(v_img)
    cv.SetZero(laser_img)

    while True:
        # 1. capture the current image
        frame = cv.QueryFrame(capture)
        if frame is None:
            # no image captured... end the processing
            break

        hsv_image = cv.CloneImage(frame)  # temporary copy of the frame
        cv.CvtColor(frame, hsv_image, cv.CV_BGR2HSV)  # convert to HSV

        # split the video frame into color channels
        cv.Split(hsv_image, h_img, s_img, v_img, None)

        # Threshold ranges of HSV components.
        cv.InRangeS(h_img, hmin, hmax, h_img)
        cv.InRangeS(s_img, smin, smax, s_img)
        cv.InRangeS(v_img, vmin, vmax, v_img)

        # Perform an AND on HSV components to identify the laser!
        cv.And(h_img, v_img, laser_img)
        # This actually Worked OK for me without using Saturation.
        #cv.cvAnd(laser_img, s_img,laser_img)

        # Merge the HSV components back together.
        cv.Merge(h_img, s_img, v_img, None, hsv_image)

        #-----------------------------------------------------
        # NOTE: default color space in OpenCV is BGR!!
        # we can now display the images
        cv.ShowImage('Thresholded_HSV_Image', hsv_image)
        cv.ShowImage('RGB_VideoFrame', frame)
        cv.ShowImage('Hue', h_img)
        cv.ShowImage('Saturation', s_img)
        cv.ShowImage('Value', v_img)
        cv.ShowImage('LaserPointer', laser_img)

        # handle events
        k = cv.WaitKey(10)

        if k == '\x1b' or k == 'q':
            # user has press the ESC key, so exit
            break
예제 #13
0
def move():
 # Initialise hand points
 leftHand.isActive = False
 rightHand.isActive = False

 # Code from stack overflow (# TODO: find link)
 # Open the image
 image = cv.LoadImage('cam_image.jpg') 

 # Keep image yuv color model version
 yuv = cv.CreateImage(cv.GetSize(image),8,3)
	
 # Convert image to gray
 gray = cv.CreateImage(cv.GetSize(image),8,1)
 cv.CvtColor(image,yuv, cv.CV_BGR2YCrCb)
 cv.Split(yuv,gray, None,None,None)

 # Use canny algorithm for edge detection
 canny = cv.CreateImage(cv.GetSize(image),8,1)
 cv.Canny(gray,canny,50,200)
 
 # Draw guide lines in the image.
 # *** USER: change the color of the line.
 cv.Line(image, (image.width/4, 0), (image.width/4, image.height/3), cv.RGB(255,0, 0)) 
 cv.Line(image, (0, image.height/3), (image.width/4, image.height/3), cv.RGB(255,0, 0))
 
 cv.Line(image, (image.width*3/4, 0), (image.width*3/4, image.height/3), cv.RGB(255,0, 0)) 
 cv.Line(image, (image.width*3/4, image.height/3), (image.width, image.height/3), cv.RGB(255,0, 0)) 

 cv.Line(image, (image.width/4, image.height*2/3), (image.width/4, image.height), cv.RGB(255,0, 0)) 
 cv.Line(image, (0, image.height*2/3), (image.width/4, image.height*2/3), cv.RGB(255,0, 0)) 

 cv.Line(image, (image.width*3/4, image.height*2/3), (image.width*3/4, image.height), cv.RGB(255,0, 0)) 
 cv.Line(image, (image.width*3/4, image.height*2/3), (image.width, image.height*2/3), cv.RGB(255,0, 0)) 

 # Search for hands
 for x in range(0, image.height):
  for y in range(0, image.width):
	# get the value of the current pixel
	pixel = canny[x, y]

	# If white check in which quarter is located
	if pixel == 255.0:
	 # If left quarter and not read yet, store state.
	 if y < image.width/4 and not rightHand.isActive:
	  if x < image.height/3:
		rightHand.currentState = 1
		rightHand.isActive = True
	  elif x > image.height*2/3:
		rightHand.currentState = -1
		rightHand.isActive = True
	  else:
		rightHand.currentState = 0
		rightHand.isActive = True
	 # If right quarter and not read yet, store state.
	 if y > image.width*3/4 and not leftHand.isActive:
	  if x < image.height/3:
		leftHand.currentState = 1
		leftHand.isActive = True
	  elif x > image.height*2/3:
		leftHand.currentState = -1
		leftHand.isActive = True
	  else:
		leftHand.currentState = 0
		leftHand.isActive = True
	 # If both hands have been read, stop the loops.
	 if rightHand.isActive and leftHand.isActive:
	  break 

 # If at the end of the loop one of the hands hasnt been read, 
 # it current coordinate will change to 0.
 if not rightHand.isActive:
  rightHand.currentState = 0
 if not leftHand.isActive:
  leftHand.currentState = 0

 # Keep commands
 commandLeft = commandRight = ""

 # Send a message if the current state is different to the previous state.
 if rightHand.currentState != rightHand.previousState:
  if rightHand.currentState == 1:
	commandRight = "1"
  elif rightHand.currentState == -1:
	commandRight = "-1"
  else:
	commandRight = "0"

  rightHand.previousState = rightHand.currentState

 else:
	commandRight = "Do nothing"

 if leftHand.currentState != leftHand.previousState:
  if leftHand.currentState == 1:
	commandLeft = "1"
  elif leftHand.currentState == -1:
	commandLeft = "-1"
  else:
	commandLeft = "0"

  leftHand.previousState = leftHand.currentState

 else:
	commandLeft = "Do nothing"
 # Return the command
 return commandRight + "_" + commandLeft
예제 #14
0
#!/usr/bin/python

import cv
import cvblob

#load the blob image from the test folder
#img = cv.LoadImage("../../test/test.png", 1)
#img = cv.LoadImage("blob.jpeg", 1)
img = cv.LoadImage("original.jpg", 1)
#convert to a greyscale image, and set intensity thresholds
grey = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1)
cv.CvtColor(img, grey, cv.CV_BGR2GRAY)
cv.Threshold(grey, grey, 100, 255, cv.CV_THRESH_BINARY)

#build the label image
labelImg = cv.CreateImage(cv.GetSize(img), cvblob.IPL_DEPTH_LABEL, 1)

#initialize a blobs class, and extract blobs from the greyscale image
blobs = cvblob.Blobs()
#import inspect
#print inspect.getmembers(grey)
#print grey.__repr__()
result = cvblob.Label(grey, labelImg, blobs)
numblobs = len(blobs.keys())

print str(numblobs) + " blobs found covering " + str(result) + "px"

avgsize = int(result / numblobs)

import copy
aboveAvgBlobs = copy.copy(blobs)
예제 #15
0
    def track_lk(self, cv_image, face):
        feature_box = None
        """ Initialize intermediate images if necessary """
        if not face.pyramid:
            face.grey = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.prev_grey = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.pyramid = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.prev_pyramid = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.features = []
        """ Create a grey version of the image """
        cv.CvtColor(cv_image, face.grey, cv.CV_BGR2GRAY)
        """ Equalize the histogram to reduce lighting effects """
        cv.EqualizeHist(face.grey, face.grey)

        if face.track_box and face.features != []:
            """ We have feature points, so track and display them """
            """ Calculate the optical flow """
            face.features, status, track_error = cv.CalcOpticalFlowPyrLK(
                face.prev_grey, face.grey, face.prev_pyramid, face.pyramid,
                face.features, (self.win_size, self.win_size), 3,
                (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.01),
                self.flags)
            """ Keep only high status points """
            face.features = [p for (st, p) in zip(status, face.features) if st]

        elif face.track_box and self.is_rect_nonzero(face.track_box):
            """ Get the initial features to track """
            """ Create a mask image to be used to select the tracked points """
            mask = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            """ Begin with all black pixels """
            cv.Zero(mask)
            """ Get the coordinates and dimensions of the track box """
            try:
                x, y, w, h = face.track_box
            except:
                return None

            if self.auto_face_tracking:
                #                """ For faces, the detect box tends to extend beyond the actual object so shrink it slightly """
                #                x = int(0.97 * x)
                #                y = int(0.97 * y)
                #                w = int(1 * w)
                #                h = int(1 * h)
                """ Get the center of the track box (type CvRect) so we can create the
                    equivalent CvBox2D (rotated rectangle) required by EllipseBox below. """
                center_x = int(x + w / 2)
                center_y = int(y + h / 2)
                roi_box = ((center_x, center_y), (w, h), 0)
                """ Create a filled white ellipse within the track_box to define the ROI. """
                cv.EllipseBox(mask, roi_box, cv.CV_RGB(255, 255, 255),
                              cv.CV_FILLED)
            else:
                """ For manually selected regions, just use a rectangle """
                pt1 = (x, y)
                pt2 = (x + w, y + h)
                cv.Rectangle(mask, pt1, pt2, cv.CV_RGB(255, 255, 255),
                             cv.CV_FILLED)
            """ Create the temporary scratchpad images """
            eig = cv.CreateImage(cv.GetSize(self.grey), 32, 1)
            temp = cv.CreateImage(cv.GetSize(self.grey), 32, 1)

            if self.feature_type == 0:
                """ Find keypoints to track using Good Features to Track """
                face.features = cv.GoodFeaturesToTrack(
                    face.grey,
                    eig,
                    temp,
                    self.max_count,
                    self.quality,
                    self.good_feature_distance,
                    mask=mask,
                    blockSize=self.block_size,
                    useHarris=self.use_harris,
                    k=0.04)

            elif self.feature_type == 1:
                """ Get the new features using SURF """
                (surf_features, descriptors) = cv.ExtractSURF(
                    face.grey, mask, cv.CreateMemStorage(0),
                    (0, self.surf_hessian_quality, 3, 1))
                for feature in surf_features:
                    face.features.append(feature[0])
            #
            if self.auto_min_features:
                """ Since the detect box is larger than the actual face
                    or desired patch, shrink the number of features by 10% """
                face.min_features = int(len(face.features) * 0.9)
                face.abs_min_features = int(0.5 * face.min_features)
        """ Swapping the images """
        face.prev_grey, face.grey = face.grey, face.prev_grey
        face.prev_pyramid, face.pyramid = face.pyramid, face.prev_pyramid
        """ If we have some features... """
        if len(face.features) > 0:
            """ The FitEllipse2 function below requires us to convert the feature array
                into a CvMat matrix """
            try:
                self.feature_matrix = cv.CreateMat(1, len(face.features),
                                                   cv.CV_32SC2)
            except:
                pass
            """ Draw the points as green circles and add them to the features matrix """
            i = 0
            for the_point in face.features:
                if self.show_features:
                    cv.Circle(self.marker_image,
                              (int(the_point[0]), int(the_point[1])), 2,
                              (0, 255, 0, 0), cv.CV_FILLED, 8, 0)
                try:
                    cv.Set2D(self.feature_matrix, 0, i,
                             (int(the_point[0]), int(the_point[1])))
                except:
                    pass
                i = i + 1
            """ Draw the best fit ellipse around the feature points """
            if len(face.features) > 6:
                feature_box = cv.FitEllipse2(self.feature_matrix)
            else:
                feature_box = None
            """ Publish the ROI for the tracked object """
            # try:
            #     (roi_center, roi_size, roi_angle) = feature_box
            # except:
            #     logger.info("Patch box has shrunk to zeros...")
            #     feature_box = None

            # if feature_box and not self.drag_start and self.is_rect_nonzero(face.track_box):
            #     self.ROI = RegionOfInterest()
            #     self.ROI.x_offset = min(self.image_size[0], max(0, int(roi_center[0] - roi_size[0] / 2)))
            #     self.ROI.y_offset = min(self.image_size[1], max(0, int(roi_center[1] - roi_size[1] / 2)))
            #     self.ROI.width = min(self.image_size[0], int(roi_size[0]))
            #     self.ROI.height = min(self.image_size[1], int(roi_size[1]))

            # self.pubROI.publish(self.ROI)

        if feature_box is not None and len(face.features) > 0:
            return feature_box
        else:
            return None
width, height = mouse.screen_size()
capture = cv.CaptureFromCAM(-1)
image = cv.QueryFrame(capture)
X, Y = mouse.position()
click = False
#writer=cv.CreateVideoWriter("output.avi", 0, 15, cv.GetSize(image), 1)
count = 0
cv.NamedWindow("Image Window")
cv.CreateTrackbar("min-brightness", "Image Window", minV, 100,
                  changeMinBrightness)

while True:
    image = cv.QueryFrame(capture)
    cv.Flip(image, flipMode=1)
    grey = cv.CreateImage(cv.GetSize(image), 8, 1)
    cv.CvtColor(image, grey, cv.CV_BGR2GRAY)

    #Detect face in image
    if count % 10 == 0:
        face = cv.HaarDetectObjects(grey, hc, cv.CreateMemStorage(), 1.2, 2,
                                    cv.CV_HAAR_DO_CANNY_PRUNING, (0, 0))

    #print face
    for [(x, y, w, h), k] in face:
        cv.Rectangle(image, (x, y), (x + w, y + h), (0, 255, 0))

    window_width, window_height = cv.GetSize(image)
    hsv_image = cv.CreateImage(cv.GetSize(image), 8, 3)
    hsv_mask = cv.CreateImage(cv.GetSize(image), 8, 1)
    hsv_edge = cv.CreateImage(cv.GetSize(image), 8, 1)
    hsv_min = cv.Scalar(0, 30, minV, 0)
예제 #17
0
    def image_callback(self, data):
        """ Time this loop to get cycles per second """
        start = rospy.Time.now()
        """ Convert the raw image to OpenCV format using the convert_image() helper function """
        cv_image = self.convert_image(data)
        """ Some webcams invert the image """
        if self.flip_image:
            cv.Flip(cv_image)
        """ Create a few images we will use for display """
        if not self.image:
            self.image_size = cv.GetSize(cv_image)
            self.image = cv.CreateImage(self.image_size, 8, 3)
            self.marker_image = cv.CreateImage(self.image_size, 8, 3)
            self.display_image = cv.CreateImage(self.image_size, 8, 3)
            self.processed_image = cv.CreateImage(self.image_size, 8, 3)
            cv.Zero(self.marker_image)
        """ Copy the current frame to the global image in case we need it elsewhere"""
        cv.Copy(cv_image, self.image)

        if not self.keep_marker_history:
            cv.Zero(self.marker_image)
        """ Process the image to detect and track objects or features """
        processed_image = self.process_image(cv_image)
        """ If the result is a greyscale image, convert to 3-channel for display purposes """
        if processed_image.channels == 1:
            cv.CvtColor(processed_image, self.processed_image, cv.CV_GRAY2BGR)
        else:
            cv.Copy(processed_image, self.processed_image)
        """ Display the user-selection rectangle or point."""
        self.display_markers()

        if self.night_mode:
            """ Night mode: only display the markers """
            cv.SetZero(self.processed_image)
        """ Merge the processed image and the marker image """
        cv.Or(self.processed_image, self.marker_image, self.display_image)
        # TODO Draw the images on the rectangle
        # if self.track_box:
        #     if self.auto_face_tracking:
        #         cv.EllipseBox(self.display_image, self.track_box, cv.CV_RGB(255, 0, 0), 2)
        #     else:
        #         (center, size, angle) = self.track_box
        #         pt1 = (int(center[0] - size[0] / 2), int(center[1] - size[1] / 2))
        #         pt2 = (int(center[0] + size[0] / 2), int(center[1] + size[1] / 2))
        #
        #         cv.Rectangle(self.display_image, pt1, pt2, cv.RGB(255, 0, 0), 2, 8, 0)
        #
        # elif self.detect_box:
        #     (pt1_x, pt1_y, w, h) = self.detect_box
        #     cv.Rectangle(self.display_image, (pt1_x, pt1_y), (pt1_x + w, pt1_y + h), cv.RGB(255, 0, 0), 2, 8, 0)
        """ Handle keyboard events """
        self.keystroke = cv.WaitKey(5)

        duration = rospy.Time.now() - start
        duration = duration.to_sec()
        fps = int(1.0 / duration)
        self.cps_values.append(fps)
        if len(self.cps_values) > self.cps_n_values:
            self.cps_values.pop(0)
        self.cps = int(sum(self.cps_values) / len(self.cps_values))

        if self.show_text:
            hscale = 0.2 * self.image_size[0] / 160. + 0.1
            vscale = 0.2 * self.image_size[1] / 120. + 0.1
            text_font = cv.InitFont(cv.CV_FONT_VECTOR0, hscale, vscale, 0, 1,
                                    8)
            """ Print cycles per second (CPS) and resolution (RES) at top of the image """
            if self.image_size[0] >= 640:
                vstart = 25
                voffset = int(50 + self.image_size[1] / 120.)
            elif self.image_size[0] == 320:
                vstart = 15
                voffset = int(35 + self.image_size[1] / 120.)
            else:
                vstart = 10
                voffset = int(20 + self.image_size[1] / 120.)
            cv.PutText(self.display_image, "CPS: " + str(self.cps),
                       (10, vstart), text_font, cv.RGB(255, 255, 0))
            cv.PutText(
                self.display_image, "RES: " + str(self.image_size[0]) + "X" +
                str(self.image_size[1]), (10, voffset), text_font,
                cv.RGB(255, 255, 0))

        if not self.headless:
            # Now display the image.
            cv.ShowImage(self.cv_window_name, self.display_image)
        """ Publish the display image back to ROS """
        try:
            """ Convertion for cv2 is needed """
            cv2_image = numpy.asarray(self.display_image[:, :])
            self.output_image_pub.publish(
                self.bridge.cv2_to_imgmsg(cv2_image, "bgr8"))
        except CvBridgeError, e:
            logger.error(e)
예제 #18
0
 def set_image(self, img, bgr=False):
     if bgr:
         img = cv.CloneImage(img)
         cv.CvtColor(img, img, cv.CV_BGR2RGB)
     self.in_queue.put(((img.width, img.height), img.tostring()))
예제 #19
0
import numpy as np, cv

img1 = cv.LoadImage(fn1, 0)
img2 = cv.LoadImage(fn2, 0)

h1, w1 = img1.height, img1.width
h2, w2 = img2.height, img2.width
vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
vis[:h1, :w1] = cv.GetMat(img1)
vis[:h2, w1:w1 + w2] = cv.GetMat(img2)
vis2 = cv.CreateMat(vis.shape[0], vis.shape[1], cv.CV_8UC3)
cv.CvtColor(cv.fromarray(vis), vis2, cv.CV_GRAY2BGR)

cv.ShowImage("test", vis2)
cv.WaitKey()
예제 #20
0
파일: first.py 프로젝트: arjun001/project
    def detect_motion(self, sensitivity='medium'):

        #Finding Video Size from the first frame
        frame = cv.QueryFrame(self.video_handle)
        frame_size = cv.GetSize(frame)
        '''Initializing Image Variables(to be used in motion detection) with required types and sizes'''
        # Image containg instantaneous moving rectangles
        color_image = cv.CreateImage(frame_size, 8, 3)
        # Resizing to window size
        color_output = cv.CreateImage(self.window_size, 8, 3)
        # Grey Image used for contour detection
        grey_image = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
        # Image storing background (moving pixels are averaged over small time window)
        moving_average = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 3)
        # Image for storing tracks resized to window size
        track_output = cv.CreateImage(self.window_size, cv.IPL_DEPTH_8U, 3)
        track_image, track_win = self.init_track_window(frame)

        def totuple(a):
            try:
                return tuple(totuple(i) for i in a)
            except TypeError:
                return a

        first = True
        # Infinite loop for continuous detection of motion
        while True:
            '''########## Pixelwise Detection of Motion in a frame ###########'''
            # Capturing Frame
            color_image = cv.QueryFrame(self.video_handle)

            ##### Sensitivity Control 1 #####
            if (sensitivity == 'medium') or (sensitivity == 'low'):
                # Gaussian Smoothing
                cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, .020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)
            #cv.ShowImage("BG",difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            ##### Sensitivity Control 2 #####
            sens_thres = 90 if (sensitivity == 'low') or (self.opt
                                                          == 'cam') else 40
            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, sens_thres, 255,
                         cv.CV_THRESH_BINARY)
            '''### Blobing moved adjacent pixels, finding closed contours and bounding rectangles ###'''
            ##### Sensitivity Control 3 #####
            if (sensitivity == 'medium') or (sensitivity == 'low'):
                # Dilate and erode to get people blobs
                ker_size = 20 if self.opt == 'file' else 50
                cv.Dilate(grey_image, grey_image, None, ker_size)
                cv.Erode(grey_image, grey_image, None, 3)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)
            points = []
            while contour:
                bound_rect = cv.BoundingRect(list(contour))
                polygon_points = cv.ApproxPoly(list(contour), storage,
                                               cv.CV_POLY_APPROX_DP)

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])

                if (self.opt == 'file'):
                    points.append(pt1)
                    points.append(pt2)
                elif (bound_rect[0] - bound_rect[2] >
                      20) and (bound_rect[1] - bound_rect[3] > 20):
                    points.append(pt1)
                    points.append(pt2)

                box = cv.MinAreaRect2(polygon_points)
                box2 = cv.BoxPoints(box)
                box3 = np.int0(np.around(box2))
                box4 = totuple(box3)
                box5 = box4 + (box4[0], )

                # Filling the contours in the greyscale image (visual blobs instead of just contours)
                cv.FillPoly(grey_image, [
                    list(polygon_points),
                ], cv.CV_RGB(255, 255, 255), 0, 0)

                # Following line to draw detected contours as well
                #cv.PolyLine( color_image, [ polygon_points, ], 0, cv.CV_RGB(255,0,0), 1, 0, 0 )

                # Drawing Rectangle around the detected contour
                cv.PolyLine(color_image, [list(box5)], 0, (0, 255, 255), 2)

                if len(points):  # (self.opt == 'file') and
                    center1 = (pt1[0] + pt2[0]) / 2
                    center2 = (pt1[1] + pt2[1]) / 2
                    cv.Circle(color_image, (center1, center2), 5,
                              cv.CV_RGB(0, 255, 0), -1)
                    rad = 3 if self.opt == 'file' else 5
                    cv.Circle(track_image, (center1, center2), rad,
                              cv.CV_RGB(255, 128, 0), -1)

                contour = contour.h_next()

            # Uncomment to track centroid of all the moved boxes (only for WebCam)
            '''
            if (self.opt == 'cam') and len(points):
                center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                cv.Circle(track_image, center_point, 15, cv.CV_RGB(255, 128, 0), -1)
            '''
            cv.Resize(color_image, color_output, cv.CV_INTER_AREA)
            cv.ShowImage("Original", color_output)

            cv.Resize(track_image, track_output, cv.CV_INTER_AREA)
            cv.ShowImage(track_win, track_output)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if (0xFF & c == 27):
                cv.SaveImage('Tracks_img_042_' + sensitivity + '.jpeg',
                             track_output)
                break
예제 #21
0
파일: hsv.py 프로젝트: nomad2012/fido
        x_co = x
        y_co = y


cv.NamedWindow("camera", 1)
cv.MoveWindow("camera", 0, 0)
capture = cv.CaptureFromCAM(0)
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8)
mins = [255, 255, 255]
maxs = [0, 0, 0]
while True:
    src = cv.QueryFrame(capture)
    cv.Smooth(src, src, cv.CV_BLUR, 3)
    hsv = cv.CreateImage(cv.GetSize(src), 8, 3)
    thr = cv.CreateImage(cv.GetSize(src), 8, 1)
    cv.CvtColor(src, hsv, cv.CV_BGR2HSV)
    cv.SetMouseCallback("camera", on_mouse, 0)
    s = cv.Get2D(hsv, y_co, x_co)
    maxs = map(max, maxs, s)
    mins = map(min, mins, s)
    print "H:", s[0], "      S:", s[1], "       V:", s[2]
    print "min = {}, max = {}".format(mins, maxs)
    cv.PutText(src,
               str(s[0]) + "," + str(s[1]) + "," + str(s[2]), (x_co, y_co),
               font, (55, 25, 255))
    cv.ShowImage("camera", src)
    k = cv.WaitKey(10)
    if k != -1:
        print "key = {}".format(k)
    if k == ord('r'):
        maxs = [0, 0, 0]
예제 #22
0
if __name__ == '__main__':
    if len(sys.argv) > 1:
        im = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
    else:
        url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/fruits.jpg'
        filedata = urllib2.urlopen(url).read()
        imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
        cv.SetData(imagefiledata, filedata, len(filedata))
        im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)

    # create the output im
    col_edge = cv.CreateImage((im.width, im.height), 8, 3)

    # convert to grayscale
    gray = cv.CreateImage((im.width, im.height), 8, 1)
    edge = cv.CreateImage((im.width, im.height), 8, 1)
    cv.CvtColor(im, gray, cv.CV_BGR2GRAY)

    # create the window
    cv.NamedWindow(win_name, cv.CV_WINDOW_AUTOSIZE)

    # create the trackbar
    cv.CreateTrackbar(trackbar_name, win_name, 1, 100, on_trackbar)

    # show the im
    on_trackbar(0)

    # wait a key pressed to end
    cv.WaitKey(0)
예제 #23
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]

            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 3)
                #print pt1, pt2

            if len(points):
                center_point = reduce(
                    lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2),
                    points)
                #cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                #cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                #cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 10,
                          cv.CV_RGB(255, 100, 0), 8)
                print center_point

                ########################################  contour center  ####################################################################
                cx = ((bound_rect[2]) / 2) + bound_rect[0]
                cy = ((bound_rect[3]) / 2) + bound_rect[1]
                print cx, cy


######################################### servo motor ######################################################

#######################################################################################################################

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
예제 #24
0
def watch_for_card(camera):
	has_moved = False
	been_to_base = False

	global captures
	global font
	captures = []

	font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0)
	img = cv.QueryFrame(camera)
	size = cv.GetSize(img)
	n_pixels = size[0]*size[1]

	grey = cv.CreateImage(size, 8,1)
	recent_frames = [cv.CloneImage(grey)]
	base = cv.CloneImage(grey)
	cv.CvtColor(img, base, cv.CV_RGB2GRAY)
	#cv.ShowImage('card', base)
	tmp = cv.CloneImage(grey)


	while True:
		img = cv.QueryFrame(camera)
		cv.CvtColor(img, grey, cv.CV_RGB2GRAY)

		biggest_diff = max(sum_squared(grey, frame) / n_pixels for frame in recent_frames)

		#display the cam view
		cv.PutText(img, "%s" % biggest_diff, (1,24), font, (255,255,255))
		cv.ShowImage('win',img)
		recent_frames.append(cv.CloneImage(grey))
		if len(recent_frames) > 5:
			del recent_frames[0]

		#check for keystroke
		c = cv.WaitKey(10)
		#if there was a keystroke, reset the last capture
		if c == 27:
			return captures
		elif c == 32:
			has_moved = True
			been_to_base = True
		elif c == 114:
			base = cv.CloneImage(grey)


		#if we're stable-ish
		if biggest_diff < 10:
			#if we're similar to base, update base
			#else, check for card
			#base_diff = max(sum_squared(base, frame) / n_pixels for frame in recent_frames)
			base_corr = min(ccoeff_normed(base, frame) for frame in recent_frames)
			#cv.ShowImage('debug', base)

			"""for i, frame in enumerate(recent_frames):
				tmp = cv.CloneImage(base)
				cv.Sub(base, frame, tmp)
				cv.Pow(tmp, tmp, 2.0)
				cv.PutText(tmp, "%s" % (i+1), (1,24), font, (255, 255, 255))
				#my_diff = sum_squared(base, frame) / n_pixels
				my_diff = ccoeff_normed(base, frame) #score(base, frame, cv.CV_TM_CCOEFF_NORMED)
				cv.PutText(tmp, "%s" % my_diff, (40, 24), font, (255, 255, 255))
				cv.ShowImage('dbg%s' % (i+1), tmp)"""
			#print "stable. corr = %s. moved = %s. been_to_base = %s" % (base_corr, has_moved, been_to_base)
			if base_corr > 0.75:
				base = cv.CloneImage(grey)
			#	cv.ShowImage('debug', base)
				has_moved = False
				been_to_base = True
			elif has_moved and been_to_base:
				corners = detect_card(grey, base)
				if corners is not None:
					card = get_card(grey, corners)
					cv.Flip(card,card,-1)
					captures.append(card)
					update_windows()
					#cv.ShowImage('card', card)
					has_moved = False
					been_to_base = False
		else:
			has_moved = True
예제 #25
0
    def hough_it(self, n_ball, iteration):
        # create gray scale image of balls
        gray_image = cv.CreateImage((self.width, self.height), 8, 1)
        cv.CvtColor(self.cv_image, gray_image, cv.CV_BGR2GRAY)

        # create gray scale array of balls
        gray_array = self.cv2array(gray_image)

        # find Hough circles
        circles = cv2.HoughCircles(gray_array, cv.CV_HOUGH_GRADIENT, 1, 40, param1=50,  \
                  param2=self.hough_accumulator, minRadius=self.hough_min_radius,       \
                  maxRadius=self.hough_max_radius)

        # Check for at least one ball found
        if circles is None:
            # display no balls found message on head display
            self.splash_screen("no balls", "found")
            # no point in continuing so exit with error message
            sys.exit("ERROR - hough_it - No golf balls found")

        circles = numpy.uint16(numpy.around(circles))

        ball_data = {}
        n_balls = 0

        circle_array = numpy.asarray(self.cv_image)

        # check if golf ball is in ball tray
        for i in circles[0, :]:
            # convert to baxter coordinates
            ball = self.pixel_to_baxter((i[0], i[1]), self.tray_distance)

            if self.is_near_ball_tray(ball):
                # draw the outer circle in red
                cv2.circle(circle_array, (i[0], i[1]), i[2], (0, 0, 255), 2)
                # draw the center of the circle in red
                cv2.circle(circle_array, (i[0], i[1]), 2, (0, 0, 255), 3)
            elif i[1] > 800:
                # draw the outer circle in red
                cv2.circle(circle_array, (i[0], i[1]), i[2], (0, 0, 255), 2)
                # draw the center of the circle in red
                cv2.circle(circle_array, (i[0], i[1]), 2, (0, 0, 255), 3)
            else:
                # draw the outer circle in green
                cv2.circle(circle_array, (i[0], i[1]), i[2], (0, 255, 0), 2)
                # draw the center of the circle in green
                cv2.circle(circle_array, (i[0], i[1]), 2, (0, 255, 0), 3)

                ball_data[n_balls] = (i[0], i[1], i[2])
                n_balls += 1

        circle_image = cv.fromarray(circle_array)

        cv.ShowImage("Hough Circle", circle_image)

        # 3ms wait
        cv.WaitKey(3)

        # display image on head monitor
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 1)
        position = (30, 60)
        s = "Searching for golf balls"
        cv.PutText(circle_image, s, position, font, self.white)
        msg = cv_bridge.CvBridge().cv_to_imgmsg(circle_image, encoding="bgr8")
        self.pub.publish(msg)

        if self.save_images:
            # save image of Hough circles on raw image
            file_name = self.image_dir                                                 \
                      + "hough_circle_" + str(n_ball) + "_" + str(iteration) + ".jpg"
            cv.SaveImage(file_name, circle_image)

        # Check for at least one ball found
        if n_balls == 0:  # no balls found
            # display no balls found message on head display
            self.splash_screen("no balls", "found")
            # less than 12 balls found, no point in continuing, exit with error message
            sys.exit("ERROR - hough_it - No golf balls found")

        # select next ball and find it's position
        next_ball = self.find_next_golf_ball(ball_data, iteration)

        # find best gripper angle to avoid touching neighbouring ball
        angle = self.find_gripper_angle(next_ball, ball_data)

        # return next golf ball position and pickup angle
        return next_ball, angle
예제 #26
0
#hardcoded optimizable params, might be implemented with a slider afterwards
threshold_limit1_upper = 40  #Threshold for difference image calculation
threshold_limit1_lower = 20
fading_factor = 150  #Fading factor for sum image
threshold_limit2_lower = 100  #Threshold for sum image calculation
threshold_limit2_upper = 255
detection_skip = 13  #Delay after a single movement
rotation_factor = 5  #Rotation per detection
filter_depth = 8  #Low pass moving average filter depth
cooloff_timer_limit = 10  #Motor cooloff timer limit
max_area = 500  #min area for a difference image contour
non_rotation_band_h = 2 * img.width / 3 - 70  #Non_rotation band high limit
non_rotation_band_l = img.width / 3 + 70  #Non_rotation band lower limit

#Primary initialization
cv.CvtColor(img, gray_image, cv.CV_RGB2GRAY)
cv.Smooth(gray_image, gray_image, cv.CV_GAUSSIAN, 3, 3)
cv.Copy(gray_image, prev_image)

#Initializing store for contour detection
store = cv.CreateMemStorage()

#misc initialization
flag = False  #GoodFeatureToTrack execution flag
detection_skip_counter = 0  #Detection skipping counter
cooloff_flag = False  #Flag for motor-cooloff
rotation_flag = False  #Flag for rotation occuring
cooloff_timer = 0  #Motocooldown timer
avg = 0  #Avg position of the difference contour
rotation_multiplier = 0  #x times rotation for a certain detection
예제 #27
0
def thresholded_image(image, min_treshold, max_treshold):
    image_hsv = cv.CreateImage(cv.GetSize(image), image.depth, 3)
    cv.CvtColor(image, image_hsv, cv.CV_BGR2HSV)
    image_threshed = cv.CreateImage(cv.GetSize(image), image.depth, 1)
    cv.InRangeS(image_hsv, min_treshold, max_treshold, image_threshed)
    return image_threshed
예제 #28
0
    cv.NamedWindow("Mask", 1)
    cv.CreateTrackbar("Open", "Camera", 0, 10, Opening)
    cv.CreateTrackbar("Close", "Camera", 0, 10, Closing)
    
    src = cv.QueryFrame(capture)
    hsv_frame = cv.CreateImage(src.getSize(), cv.IPL_DEPTH_8U, 3)
    thresholded = cv.CreateImage(src.getSize(), cv.IPL_DEPTH_8U, 1)
    thresholded2 = cv.CreateImage(src.getSize(), cv.IPL_DEPTH_8U, 1)
        
    storage = cv.CreateMemStorage(0)
    
    while True:
        src = cv.QueryFrame(capture)
        #image = cv.CloneImage(src)
        #dest = cv.CloneImage(src)
        #hsv = cv.CloneImage(src)
        
        # convert to HSV for color matching
        cv.CvtColor(image, hsv, cv.CV_BGR2HSV)
        
        mask = cv.CreateImage(cv.GetSize(image), 8, 1);
        cv.InRangeS(hsv, cv.Scalar(0, 50, 170, 0), cv.Scalar(30, 255, 255, 0), mask);
        
        cv.ShowImage("Camera", hsv)
        cv.ShowImage("Mask", mask)
        
        if cv.WaitKey(10) == 27:
            break
        
    cv.DestroyWindow("camera")
예제 #29
0
def process(args):
    '''process a set of files'''

    global slipmap, mosaic
    scan_count = 0
    files = []
    for a in args:
        if os.path.isdir(a):
            files.extend(file_list(a, ['jpg', 'pgm', 'png']))
        else:
            if a.find('*') != -1:
                files.extend(glob.glob(a))
            else:
                files.append(a)
    files.sort()
    num_files = len(files)
    print("num_files=%u" % num_files)
    region_count = 0

    slipmap = mp_slipmap.MPSlipMap(service=opts.service,
                                   elevation=True,
                                   title='Map')
    icon = slipmap.icon('redplane.png')
    slipmap.add_object(
        mp_slipmap.SlipIcon('plane', (0, 0),
                            icon,
                            layer=3,
                            rotation=0,
                            follow=True,
                            trail=mp_slipmap.SlipTrail()))

    if opts.mission:
        from pymavlink import mavwp
        wp = mavwp.MAVWPLoader()
        wp.load(opts.mission)
        boundary = wp.polygon()
        slipmap.add_object(
            mp_slipmap.SlipPolygon('mission',
                                   boundary,
                                   layer=1,
                                   linewidth=1,
                                   colour=(255, 255, 255)))

    if opts.mavlog:
        mpos = mav_position.MavInterpolator()
        mpos.set_logfile(opts.mavlog)
    else:
        mpos = None

    if opts.kmzlog:
        kmzpos = mav_position.KmlPosition(opts.kmzlog)
    else:
        kmzpos = None

    if opts.triggerlog:
        triggerpos = mav_position.TriggerPosition(opts.triggerlog)
    else:
        triggerpos = None

    # create a simple lens model using the focal length
    C_params = cam_params.CameraParams(lens=opts.lens,
                                       sensorwidth=opts.sensorwidth)

    if opts.camera_params:
        C_params.load(opts.camera_params)

    camera_settings = MPSettings([
        MPSetting('roll_stabilised', bool, True, 'Roll Stabilised'),
        MPSetting(
            'altitude', int, 0, 'Altitude', range=(0, 10000), increment=1),
        MPSetting('filter_type',
                  str,
                  'simple',
                  'Filter Type',
                  choice=['simple', 'compactness']),
        MPSetting('fullres', bool, False, 'Full Resolution'),
        MPSetting('quality',
                  int,
                  75,
                  'Compression Quality',
                  range=(1, 100),
                  increment=1),
        MPSetting('thumbsize',
                  int,
                  60,
                  'Thumbnail Size',
                  range=(10, 200),
                  increment=1),
        MPSetting('minscore',
                  int,
                  75,
                  'Min Score',
                  range=(0, 1000),
                  increment=1,
                  tab='Scoring'),
        MPSetting('brightness',
                  float,
                  1.0,
                  'Display Brightness',
                  range=(0.1, 10),
                  increment=0.1,
                  digits=2,
                  tab='Display')
    ],
                                 title='Camera Settings')

    image_settings = MPSettings([
        MPSetting('MinRegionArea',
                  float,
                  0.15,
                  range=(0, 100),
                  increment=0.05,
                  digits=2,
                  tab='Image Processing'),
        MPSetting('MaxRegionArea',
                  float,
                  2.0,
                  range=(0, 100),
                  increment=0.1,
                  digits=1),
        MPSetting('MinRegionSize',
                  float,
                  0.1,
                  range=(0, 100),
                  increment=0.05,
                  digits=2),
        MPSetting(
            'MaxRegionSize', float, 2, range=(0, 100), increment=0.1,
            digits=1),
        MPSetting('MaxRarityPct',
                  float,
                  0.02,
                  range=(0, 100),
                  increment=0.01,
                  digits=2),
        MPSetting('RegionMergeSize',
                  float,
                  3.0,
                  range=(0, 100),
                  increment=0.1,
                  digits=1),
        MPSetting('SaveIntermediate', bool, False)
    ],
                                title='Image Settings')

    mosaic = cuav_mosaic.Mosaic(slipmap,
                                C=C_params,
                                camera_settings=camera_settings,
                                image_settings=image_settings,
                                start_menu=True)

    joelog = cuav_joe.JoeLog(None)

    if opts.view:
        viewer = mp_image.MPImage(title='Image', can_zoom=True, can_drag=True)

    if camera_settings.filter_type == 'compactness':
        calculate_compactness = True
        print("Using compactness filter")
    else:
        calculate_compactness = False

    for f in files:
        if not mosaic.started():
            print("Waiting for startup")
            while not mosaic.started():
                mosaic.check_events()
                time.sleep(0.01)

        if mpos:
            # get the position by interpolating telemetry data from the MAVLink log file
            # this assumes that the filename contains the timestamp
            frame_time = cuav_util.parse_frame_time(f) + opts.time_offset
            if camera_settings.roll_stabilised:
                roll = 0
            else:
                roll = None
            try:
                pos = mpos.position(frame_time, roll=roll)
            except Exception:
                print("No position available for %s" % frame_time)
                # skip this frame
                continue
        elif kmzpos is not None:
            pos = kmzpos.position(f)
        elif triggerpos is not None:
            pos = triggerpos.position(f)
        else:
            # get the position using EXIF data
            pos = mav_position.exif_position(f)
            pos.time += opts.time_offset

        # update the plane icon on the map
        if pos is not None:
            slipmap.set_position('plane', (pos.lat, pos.lon), rotation=pos.yaw)
            if camera_settings.altitude > 0:
                pos.altitude = camera_settings.altitude

        # check for any events from the map
        slipmap.check_events()
        mosaic.check_events()

        im_orig = cuav_util.LoadImage(f)
        (w, h) = cuav_util.image_shape(im_orig)

        if not opts.camera_params:
            C_params.set_resolution(w, h)

        im_full = im_orig

        im_640 = cv.CreateImage((640, 480), 8, 3)
        cv.Resize(im_full, im_640, cv.CV_INTER_NN)
        im_640 = numpy.ascontiguousarray(cv.GetMat(im_640))
        im_full = numpy.ascontiguousarray(cv.GetMat(im_full))

        count = 0
        total_time = 0

        t0 = time.time()
        if camera_settings.fullres:
            img_scan = im_full
        else:
            img_scan = im_640

        scan_parms = {}
        for name in image_settings.list():
            scan_parms[name] = image_settings.get(name)
        scan_parms['SaveIntermediate'] = float(scan_parms['SaveIntermediate'])

        if pos is not None:
            (sw, sh) = cuav_util.image_shape(img_scan)
            mpp = cuav_util.meters_per_pixel(pos, C=C_params)
            if mpp is not None:
                scan_parms['MetersPerPixel'] = mpp * (w / float(sw))
            regions = scanner.scan(img_scan, scan_parms)
        else:
            regions = scanner.scan(img_scan)
        regions = cuav_region.RegionsConvert(regions,
                                             cuav_util.image_shape(img_scan),
                                             cuav_util.image_shape(im_full),
                                             calculate_compactness)
        count += 1
        t1 = time.time()

        frame_time = pos.time

        regions = cuav_region.filter_regions(
            im_full,
            regions,
            frame_time=frame_time,
            min_score=camera_settings.minscore,
            filter_type=camera_settings.filter_type)

        scan_count += 1

        mosaic.add_image(pos.time, f, pos)

        if pos and len(regions) > 0:
            altitude = camera_settings.altitude
            if altitude <= 0:
                altitude = None
            joelog.add_regions(frame_time,
                               regions,
                               pos,
                               f,
                               width=w,
                               height=h,
                               altitude=altitude)

        region_count += len(regions)

        if len(regions) > 0:
            composite = cuav_mosaic.CompositeThumbnail(
                cv.GetImage(cv.fromarray(im_full)), regions)
            thumbs = cuav_mosaic.ExtractThumbs(composite, len(regions))
            mosaic.add_regions(regions, thumbs, f, pos)

        if opts.view:
            img_view = img_scan
            (wview, hview) = cuav_util.image_shape(img_view)
            mat = cv.fromarray(img_view)
            for r in regions:
                r.draw_rectangle(mat, (255, 0, 0))
            cv.CvtColor(mat, mat, cv.CV_BGR2RGB)
            viewer.set_image(mat)
            viewer.set_title('Image: ' + os.path.basename(f))

        total_time += (t1 - t0)
        if t1 != t0:
            print('%s scan %.1f fps  %u regions [%u/%u]' %
                  (os.path.basename(f), count / total_time, region_count,
                   scan_count, num_files))
예제 #30
0
    def getCoordinates(self, target="ball", debug=False):
        t = time.time()
        """
        This function will return the best coordinates found by thresholding the received image
        by the chosen threshold.
        """
        """Get the latest frame from the camera"""
        global cam, lock
        lock.acquire()
        try:
            cv.GrabFrame(cam)
            frame = cv.RetrieveFrame(cam)
        finally:
            lock.release()
        """Initialize the coordinates to -1, which means that the object is not found"""
        x = -1
        y = -1
        """Prepair image for thresholding"""
        #cv.Smooth(thresholded_frame, thresholded_frame, cv.CV_GAUSSIAN, 5, 5)
        cv.Smooth(frame, frame, cv.CV_BLUR, 3)
        cv.CvtColor(frame, self.hsv_frame, cv.CV_BGR2HSV)
        """Threshold the image according to the chosen thresholds"""
        if target == "ball":
            cv.InRangeS(self.hsv_frame, self.ball_threshold_low,
                        self.ball_threshold_high, self.thresholded_frame)
        elif target == "blue gate":
            cv.InRangeS(self.hsv_frame, self.blue_gate_threshold_low,
                        self.blue_gate_threshold_high, self.thresholded_frame)
        elif target == "yellow gate":
            cv.InRangeS(self.hsv_frame, self.yellow_gate_threshold_low,
                        self.yellow_gate_threshold_high,
                        self.thresholded_frame)
        elif target == "black":
            cv.InRangeS(self.hsv_frame, self.black_threshold_low,
                        self.black_threshold_high, self.thresholded_frame)
        elif target == "white":
            cv.InRangeS(self.hsv_frame, self.white_threshold_low,
                        self.white_threshold_high, self.thresholded_frame)

        cv.InRangeS(self.hsv_frame, self.green_threshold_low,
                    self.green_threshold_high, self.thresholded_field)
        """Now use some function to find the object"""
        blobs_image = SimpleCV.Image(self.thresholded_frame)
        field_image = SimpleCV.Image(self.thresholded_field)

        blobs = blobs_image.findBlobs(minsize=2)
        if blobs:
            if target == "ball":
                for i in range(len(blobs)):
                    i = len(blobs) - 1 - i
                    pos_x = blobs[i].maxX()
                    pos_y = blobs[i].maxY()
                    on_field = False
                    for py in range(0, pos_y):
                        if field_image.getPixel(pos_x, py) == (255, 255, 255):
                            on_field = True
                            break
                    if on_field:
                        x, y = pos_x, pos_y
                        break
            else:
                x, y = blobs[-1].coordinates()
        """Old, openCV using contours
        contours = cv.FindContours(cv.CloneImage(thresholded_frame), cv.CreateMemStorage(),mode=cv.CV_RETR_EXTERNAL)
        
        if len(contours)!=0:
            #determine the objects moments and check that the area is large  
            #enough to be our object 
            moments = cv.Moments(contours,1) 
            moment10 = cv.GetSpatialMoment(moments, 1, 0)
            moment01 = cv.GetSpatialMoment(moments, 0, 1)
            area = cv.GetCentralMoment(moments, 0, 0) 
            
            #there can be noise in the video so ignore objects with small areas 
            if area > 2: 
                #determine the x and y coordinates of the center of the object 
                #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
                x = moment10/area
                y = moment01/area"""
        if debug:
            cv.ShowImage("Camera", self.thresholded_frame)
            #thresholded_frame=SimpleCV.Image(thresholded_frame)
            #thresholded_frame.show()
        print time.time() - t

        return x, y