예제 #1
0
def cv_to_array(image_cv):
    return np.asarray(cv.GetMat(image_cv), dtype=np.float32).ravel()
예제 #2
0
def main():
    orig = cv.LoadImageM("sample/lena.bmp")
    np_mat = np.asarray(cv.GetMat(orig))
    np_mat = np.rot90(np_mat).copy()
    modif = cv.fromarray(np_mat)
    cv.SaveImage("sample/lena_90.png", modif)
예제 #3
0
    def map(self, event_filename, video_data):
        """

        Args:
            event_filename: Tuple of (event, filename)
            video_data: Binary video data

        Yields:
            A tuple in the form of ((event, filename), features) where features is a dict

            frame_features: List of frame features
            file_size: Size in bytes

            where each frame feature is a dictionary of

            frame_time: Time in seconds
            frame_num: Frame number
            prev_frame_num: Previous frame number (useful if there is a frame skip)
            keyframe: Boolean True/False
            surf: List of surf points (see impoint)
            face_widths:
            face_heights:
            predictions: Dictionary of predictions
        """
        sys.stderr.write('In Raw:%s\n' % str(event_filename))
        print(event_filename)
        ext = '.' + event_filename[1].rsplit('.')[1]
        with tempfile.NamedTemporaryFile(suffix=ext) as fp:
            with self.timer('Writing video data'):
                fp.write(video_data)
                fp.flush()
            kf = keyframe.DecisionTree(min_interval=0)
            kf.load()
            prev_frame = None
            prev_frame_num = 0
            all_out = []
            sz = len(video_data)

            self.timer.start('KF')
            try:
                for (frame_num, frame_time, frame), iskeyframe in kf(
                        viderator.frame_iter(fp.name, frozen=True)):
                    hadoopy.counter('RawFeatures', 'NumFrames')
                    self.timer.stop('KF')
                    print(frame_time)
                    if frame_num > self._max_frames:
                        break
                    if frame_num % 100 == 0:
                        with self.timer('Computing face features'):
                            faces = _detect_faces(
                                imfeat.convert_image(frame,
                                                     [('opencv', 'gray', 8)]),
                                self.cascade)
                    else:
                        faces = {}
                    out = {
                        'frame_time': frame_time,
                        'frame_num': frame_num,
                        'prev_frame_num': prev_frame_num,
                        'keyframe': iskeyframe,
                        'surf': kf.prev_vec['surf']
                    }
                    if faces:  # If any faces
                        face_heights = np.array([x[0][3] for x in faces
                                                 ]) / float(frame.height)
                        face_widths = np.array([x[0][2] for x in faces
                                                ]) / float(frame.width)
                        out['face_widths'] = face_widths
                        out['face_heights'] = face_heights
                    # Output the cur and previous frames if this is a keyframe
                    if iskeyframe and np.random.random(
                    ) < self._frame_output_prob:
                        out['prev_frame'] = cv_to_jpg(prev_frame)
                        out['frame'] = cv_to_jpg(frame)
                    # Compute scene features
                    with self.timer('Computing scene classifier features'):
                        frame_res = cv.fromarray(
                            cv2.resize(
                                np.asarray(cv.GetMat(frame)),
                                (self._image_width, self._image_height)))
                        feature = self._feat(frame_res)
                        out['predictions'] = dict(
                            (classifier_name, classifier.predict(feature)) for
                            classifier_name, classifier in self._classifiers)
                    # Output JPEG with match lines from the SURF feature
                    if np.random.random(
                    ) < self._match_line_prob and prev_frame:
                        out['surf_image'] = cv_to_jpg(
                            plot_matches(prev_frame,
                                         kf.surf_debug['matches'],
                                         kf.surf_debug['points0'],
                                         kf.surf_debug['points1'],
                                         max_feat_width=kf.max_feat_width))
                    # Output data buffer
                    all_out.append(out)
                    if len(all_out) >= self._block_size:
                        with self.timer('Yield'):
                            yield event_filename, {
                                'frame_features': all_out,
                                'file_size': sz
                            }
                            all_out = []
                    prev_frame = frame
                    prev_frame_num = frame_num
                self.timer.start('KF')
            except viderator.FPSParseException:  # NOTE(brandyn): This will disregard videos with this error
                hadoopy.counter('SkippedVideos', 'FPSParseException')
                return
            if all_out:
                with self.timer('Yield'):
                    yield event_filename, {
                        'frame_features': all_out,
                        'file_size': sz
                    }
#!/usr/bin/python

import cv

grayImg = cv.LoadImage("../1.png", cv.CV_LOAD_IMAGE_GRAYSCALE)
mat = cv.GetMat(grayImg)
#cv.SaveImage("test.png", mat)

for i in range(mat.cols):
    for j in range(mat.rows):
        if mat[j, i] >= 180:
            mat[j, i] = 255
        else:
            mat[j, i] = 0

cv.SaveImage("test.png", mat)
예제 #5
0
class ugv_image_blue:

  def __init__(self):
    self.image_pub = rospy.Publisher("/image",Image)

    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("/p3dx/front_camera/image_raw",Image,self.callback)

  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError, e:
      print e

    (rows,cols,channels) = cv_image.shape
    if cols > 60 and rows > 60 :
      cv2.circle(cv_image, (50,50), 10, 255)
    ### Start of Image Processing ######

    hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
    lower_blue = np.array([110,50,50])
    upper_blue = np.array([130,255,255])
    mask = cv2.inRange(hsv, lower_blue, upper_blue)
    mat=cv.GetMat(cv.fromarray(mask))
    moments=cv.Moments(mat)
    if((moments.m01>(-10000000000000)) and (moments.m01<10000000000000) and (moments.m00>(-1000000000000)) and (moments.m00<10000000000000) and (moments.m10>(-1000000000000)) and (moments.m10<10000000000000)):
        global detected
        mean = mask.mean()
        if(mean > 0.5):
           detected = 1
        if(mean < 0.5 and detected == 1):
           detected = 0
        yc= moments.m01/moments.m00
        xc=moments.m10/moments.m00
        width, height = cv.GetSize(mat)
        global max_right_blue_x
        global max_left_blue_x
        global max_right_blue_y
        global max_left_blue_y

        contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
        cnt = contours[0]
        #print contours

        leftmost = tuple(cnt[cnt[:,:,0].argmin()][0])
        rightmost = tuple(cnt[cnt[:,:,0].argmax()][0])
        max_left_blue_x = leftmost[0]
        max_left_blue_y = leftmost[1]
        max_right_blue_x = rightmost[0]
        max_right_blue_y = rightmost[1]
        global nxc_blue
        global nyc_blue
        nxc_blue=xc-640
        nyc_blue=yc-360
        new_max_left_blue_x = max_left_blue_x -640
        new_max_left_blue_y = max_left_blue_y - 360
        new_max_right_blue_x = max_right_blue_x - 640
        new_max_right_blue_y = max_right_blue_y - 360
	focal=1097.51
	q= nxc_blue/focal
    q1 = new_max_left_blue_x / focal
    q2 = new_max_right_blue_x / focal
        global bearing_blue
        global bearing_left_edge
        global bearing_right_edge
        bearing_right_edge = math.atan(q2)*((180.0)/(3.14159))
        bearing_left_edge = math.atan(q1)*((180.0)/(3.14159))
        bearing_blue=math.atan(q)*((180.0)/(3.14159))
        cv2.circle(cv_image,(int(xc),int(yc)),10,(0,0,255),-1)
예제 #6
0
def _create_grayscale_mat(bgr_bytes, width, height, channel):
    image = cv.CreateImageHeader((width, height), cv.IPL_DEPTH_8U, 1)
    cv.SetData(image, bgr_bytes[channel::3], width)
    return cv.GetMat(image)
    def track(self):
        # in case something else is still open
        cv.DestroyAllWindows()
        tracker = Speed()
    	imgArr = []
        capture = cv.CaptureFromCAM(self.camera_index)
        if not capture:
	    QMessageBox.information(self, "Camera Error", "Camera not found")
	    return
	cv.NamedWindow("Video", cv.CV_WINDOW_AUTOSIZE)
    	cv.MoveWindow("Video", 320, 0)
    	cv.NamedWindow("Tracking", cv.CV_WINDOW_AUTOSIZE)
    	cv.MoveWindow("Tracking", 800, 82)
    	moments = 0
    	cv.CreateTrackbar("Start at color", "Video", self.low_color, 179, self.update_low_color)
    	cv.CreateTrackbar("End at color", "Video", self.high_color, 179, self.update_high_color)
    
	start_time = 0   
        camera_on = True
        tracking = False
	needs_saving = False
	background = 0
        while camera_on:
	    if (not self.busy_updating):
		frame = cv.QueryFrame(capture)
		if not frame:
	   	    break	
		#convert color to hue space for easier tracking
		imgHSV = cv.CreateImage(cv.GetSize(frame), 8, 3)
	 	cv.CvtColor(frame, imgHSV, cv.CV_BGR2HSV)
		imgThresh = cv.CreateImage(cv.GetSize(frame), 8, 1)
		# implement interactive thresholding
		cv.InRangeS(imgHSV, cv.Scalar(self.low_color, self.MED_SV, self.MED_SV), cv.Scalar(self.high_color, self.MAX_SV, self.MAX_SV), imgThresh)
 		 		
		# find image momentsm, compute object position 
		# by dividing by area
		moments = cv.Moments(cv.GetMat(imgThresh))
		x_mov = cv.GetSpatialMoment(moments, 1, 0)
		y_mov = cv.GetSpatialMoment(moments, 0, 1)
		area = cv.GetCentralMoment(moments, 0, 0)
		# size is 480 360 for webcam
		# 324, 243 for massive-imaged external camera
		small_thresh = cv.CreateImage((self.fit_camera_width, self.fit_camera_height), 8, 1)
		cv.Resize(imgThresh, small_thresh)
		small_frame = cv.CreateImage((self.fit_camera_width, self.fit_camera_height), 8, 3)
		cv.Resize(frame, small_frame)
		cv.ShowImage("Tracking", small_thresh)	
		cv.ShowImage("Video", small_frame)

 		k = cv.WaitKey(1)
			
		# press q or escape to quit camera view
		if k == 27 or k == 113:
		    camera_on = False
		    tracking = False
		    cv.DestroyAllWindows()
		    break; 

		# click "Record!" or  press "g" to start tracking speed/recording 	
		elif k == 103 or self.start_record:
		    needs_saving = True
		    start_time = time.clock()
		    tracking = True
		    # store background in memory and proceed to recording
		    background = cv.CloneImage(frame)
		    tracker.start_time = start_time
 		    self.start_record = False
		    imgArr.append(background)
		# click "Stop recording" or press "d" to stop tracking speed/recording
		# save everything in the proper format and close recording windows
		elif k == 100 or self.end_record:
		  if needs_saving:
		    tracking = False
		    tracker.stop_time = time.clock()
		    curr_dir = os.listdir(".")
		    # create the output folder if it doesn't already exist
		    if self.output_folder not in curr_dir:
		        path_var = "./" + str(self.output_folder)
			os.mkdir(path_var)
		    # default saving directory is output_folder/Trial_<start time of trial>
		    new_vid_folder =  "./" + str(self.output_folder) + "/Trial_" + str(start_time)
		    os.mkdir(new_vid_folder)
		    tracker.out_folder = new_vid_folder 
		    # save the first frame of the film as the background image
		    background_name = str(tracker.out_folder) + "/background.png"
	   	    cv.SaveImage(background_name, background)	
 		    index = 0
		    for img in imgArr:
			output_name = str(tracker.out_folder) + "/frame_" + str(index) + ".png"
			cv.SaveImage(output_name, img)			
	    	   	index += 1 
		   # compute velocity and acceleration values	
		    tracker.update()
		    # save the tracking done so far (by pickling)
		    # can later be exported as a text file
		    tracker_file = str(tracker.out_folder) + "/Data"
		    f = open(tracker_file, 'w')
		    pickle.dump(tracker, f)
		    f.close()
		    cv.DestroyAllWindows()		    
		    break
		  else:
		    cv.DestroyAllWindows()
		    break
		if tracking:
		    # store object position
		    if area > 0:
			posX = float(x_mov)/float(area)
			posY = float(y_mov)/float(area)
			curr_time = time.clock()
		       	tracker.add_pos(posX, posY, curr_time - start_time)
			start_time = curr_time 			
			imgArr.append(cv.CloneImage(frame))
예제 #8
0
def getBottomQuarter(img):
    sub = cv.GetSubRect(img,
                        (0, img.height * 3 / 4, img.width, img.height / 4))
    return cv.GetMat(sub)
예제 #9
0
def getPortion(img, portion_num):
    sub = cv.GetSubRect(img, (0, img.height * portion_num / num_portions,
                              img.width, img.height / num_portions))
    return cv.GetMat(sub)
예제 #10
0
def _create_grayscale_image(img_bytes, img_width, img_height, channel):
    image = _create_image_header(img_width, img_height, 1)
    cv.SetData(image, img_bytes[channel::3], img_width)
    return cv.GetMat(image)
예제 #11
0
    def run(self):
        while True:
            img = cv.QueryFrame(self.capture)

            img_undist = cv.CreateImage(cv.GetSize(img), 8, 3)
            self.filter.undistort(img, img_undist)
            img = img_undist

            # Convert the BGR image to HSV space for easier color thresholding
            img_hsv = cv.CreateImage(cv.GetSize(img), 8, 3)
            cv.CvtColor(img, img_hsv, cv.CV_BGR2HSV)

            # Smooth the image with a Gaussian Kernel
            cv.Smooth(img_hsv, img_hsv, cv.CV_BLUR, 3)

            # Threshold the HSV image to find yellow objects
            img_th = cv.CreateImage(cv.GetSize(img_hsv), 8, 1)
            cv.InRangeS(img_hsv, (20, 100, 100), (30, 255, 255), img_th)

            # Connected Component Analysis
            roi = self.detector.detect(cv.GetMat(img_th))
            if len(roi) == 0:
                continue

            # Only consider the biggest blob
            i = max(roi, key=lambda x: roi.get(x).count)
            blob = roi[i]

            # Filter out blobs that are smaller
            if blob.count > 50:
                # Draw bounding box and center of object
                center_x = int(blob.x1 + (blob.x2 - blob.x1) / 2.0)
                center_y = int(blob.y1 + (blob.y2 - blob.y1) / 2.0)

                cv.Rectangle(img, (blob.x1, blob.y1), (blob.x2, blob.y2),
                             cv.RGB(255, 0, 0), 2)
                cv.Circle(img, (center_x, center_y), 2, cv.RGB(255, 0, 0), -1)

                # Draw cross
                cv.Line(img, (int(img.width / 2), 0),
                        (int(img.width / 2), img.height), cv.RGB(0, 0, 0))
                cv.Line(img, (0, int(img.height / 2)),
                        (img.width, int(img.height / 2)), cv.RGB(0, 0, 0))

                # Apply Kalman filter
                xhat = self.filter.predictUpdate(self.curr_pitch,
                                                 self.curr_yaw, center_x,
                                                 center_y)
                print math.degrees(xhat[0])
                fpix = -1 * self.filter.state2pixel(self.curr_pitch,
                                                    self.curr_yaw)
                print fpix
                cv.Circle(img, (int(fpix[0]), int(fpix[1])), 2,
                          cv.RGB(0, 255, 0), -1)

                #self._MoveCameraHead(center_x, center_y, angle)

                # Draw predicted object center

            # Display the thresholded image
            cv.ShowImage('Tracking', img_undist)

            if cv.WaitKey(10) == 27:
                break

        self.serial.close()
예제 #12
0
def process(args):
    '''process a set of files'''

    global slipmap, mosaic
    scan_count = 0
    files = []
    for a in args:
        if os.path.isdir(a):
            files.extend(file_list(a, ['jpg', 'pgm', 'png']))
        else:
            if a.find('*') != -1:
                files.extend(glob.glob(a))
            else:
                files.append(a)
    files.sort()
    num_files = len(files)
    print("num_files=%u" % num_files)
    region_count = 0

    slipmap = mp_slipmap.MPSlipMap(service=opts.service,
                                   elevation=True,
                                   title='Map')
    icon = slipmap.icon('redplane.png')
    slipmap.add_object(
        mp_slipmap.SlipIcon('plane', (0, 0),
                            icon,
                            layer=3,
                            rotation=0,
                            follow=True,
                            trail=mp_slipmap.SlipTrail()))

    for flag in opts.flag:
        a = flag.split(',')
        lat = a[0]
        lon = a[1]
        icon = 'flag.png'
        if len(a) > 2:
            icon = a[2] + '.png'
            icon = slipmap.icon(icon)
            slipmap.add_object(
                mp_slipmap.SlipIcon('icon - %s' % str(flag),
                                    (float(lat), float(lon)),
                                    icon,
                                    layer=3,
                                    rotation=0,
                                    follow=False))

    if opts.mission:
        from pymavlink import mavwp
        wp = mavwp.MAVWPLoader()
        wp.load(opts.mission)
        plist = wp.polygon_list()
        if len(plist) > 0:
            for i in range(len(plist)):
                slipmap.add_object(
                    mp_slipmap.SlipPolygon('Mission-%s-%u' % (opts.mission, i),
                                           plist[i],
                                           layer='Mission',
                                           linewidth=2,
                                           colour=(255, 255, 255)))

    if opts.mavlog:
        mpos = mav_position.MavInterpolator()
        mpos.set_logfile(opts.mavlog)
    else:
        mpos = None

    if opts.gammalog is not None:
        gamma = parse_gamma_log(opts.gammalog)
    else:
        gamma = None

    if opts.kmzlog:
        kmzpos = mav_position.KmlPosition(opts.kmzlog)
    else:
        kmzpos = None

    if opts.triggerlog:
        triggerpos = mav_position.TriggerPosition(opts.triggerlog)
    else:
        triggerpos = None

    # create a simple lens model using the focal length
    C_params = cam_params.CameraParams(lens=opts.lens,
                                       sensorwidth=opts.sensorwidth)

    if opts.camera_params:
        C_params.load(opts.camera_params)

    camera_settings = MPSettings([
        MPSetting('roll_stabilised', bool, opts.roll_stabilised,
                  'Roll Stabilised'),
        MPSetting('altitude',
                  int,
                  opts.altitude,
                  'Altitude',
                  range=(0, 10000),
                  increment=1),
        MPSetting(
            'minalt', int, 30, 'MinAltitude', range=(0, 10000), increment=1),
        MPSetting('mpp100',
                  float,
                  0.0977,
                  'MPPat100m',
                  range=(0, 10000),
                  increment=0.001),
        MPSetting('rotate180', bool, opts.rotate_180, 'rotate180'),
        MPSetting('filter_type',
                  str,
                  'simple',
                  'Filter Type',
                  choice=['simple', 'compactness']),
        MPSetting('quality',
                  int,
                  75,
                  'Compression Quality',
                  range=(1, 100),
                  increment=1),
        MPSetting('thumbsize',
                  int,
                  opts.thumbsize,
                  'Thumbnail Size',
                  range=(10, 200),
                  increment=1),
        MPSetting('minscore',
                  int,
                  opts.minscore,
                  'Min Score',
                  range=(0, 1000),
                  increment=1,
                  tab='Scoring'),
        MPSetting('brightness',
                  float,
                  1.0,
                  'Display Brightness',
                  range=(0.1, 10),
                  increment=0.1,
                  digits=2,
                  tab='Display')
    ],
                                 title='Camera Settings')

    image_settings = MPSettings([
        MPSetting('MinRegionArea',
                  float,
                  0.3,
                  range=(0, 100),
                  increment=0.05,
                  digits=2,
                  tab='Image Processing'),
        MPSetting('MaxRegionArea',
                  float,
                  4.0,
                  range=(0, 100),
                  increment=0.1,
                  digits=1),
        MPSetting('MinRegionSize',
                  float,
                  0.1,
                  range=(0, 100),
                  increment=0.05,
                  digits=2),
        MPSetting('MaxRegionSize',
                  float,
                  3.0,
                  range=(0, 100),
                  increment=0.1,
                  digits=1),
        MPSetting('MaxRarityPct',
                  float,
                  0.02,
                  range=(0, 100),
                  increment=0.01,
                  digits=2),
        MPSetting('RegionMergeSize',
                  float,
                  1.0,
                  range=(0, 100),
                  increment=0.1,
                  digits=1),
        MPSetting('BlueEmphasis', bool, opts.blue_emphasis),
        MPSetting('SaveIntermediate', bool, opts.debug)
    ],
                                title='Image Settings')

    mosaic = cuav_mosaic.Mosaic(slipmap,
                                C=C_params,
                                camera_settings=camera_settings,
                                image_settings=image_settings,
                                start_menu=True,
                                classify=opts.categories,
                                thumb_size=opts.mosaic_thumbsize)

    joelog = cuav_joe.JoeLog(None)

    if opts.view:
        viewer = mp_image.MPImage(title='Image', can_zoom=True, can_drag=True)

    for f in files:
        if not mosaic.started():
            print("Waiting for startup")
            while not mosaic.started():
                mosaic.check_events()
                time.sleep(0.01)

        if mpos:
            # get the position by interpolating telemetry data from the MAVLink log file
            # this assumes that the filename contains the timestamp
            if gamma is not None:
                frame_time = parse_gamma_time(f, gamma)
            else:
                frame_time = cuav_util.parse_frame_time(f)
            frame_time += opts.time_offset
            if camera_settings.roll_stabilised:
                roll = 0
            else:
                roll = None
            try:
                pos = mpos.position(frame_time, roll=roll)
            except Exception:
                print("No position available for %s" % frame_time)
                # skip this frame
                continue
        elif kmzpos is not None:
            pos = kmzpos.position(f)
        elif triggerpos is not None:
            pos = triggerpos.position(f)
        else:
            # get the position using EXIF data
            pos = mav_position.exif_position(f)
            pos.time += opts.time_offset

        # update the plane icon on the map
        if pos is not None:
            slipmap.set_position('plane', (pos.lat, pos.lon), rotation=pos.yaw)
            if camera_settings.altitude > 0:
                pos.altitude = camera_settings.altitude

        # check for any events from the map
        slipmap.check_events()
        mosaic.check_events()

        im_orig = cuav_util.LoadImage(f, rotate180=camera_settings.rotate180)
        if im_orig is None:
            continue
        (w, h) = cuav_util.image_shape(im_orig)

        if not opts.camera_params:
            C_params.set_resolution(w, h)

        im_full = im_orig

        im_640 = cv.CreateImage((640, 480), 8, 3)
        cv.Resize(im_full, im_640, cv.CV_INTER_NN)
        im_640 = numpy.ascontiguousarray(cv.GetMat(im_640))
        im_full = numpy.ascontiguousarray(cv.GetMat(im_full))

        count = 0
        total_time = 0

        t0 = time.time()
        img_scan = im_full

        scan_parms = {}
        for name in image_settings.list():
            scan_parms[name] = image_settings.get(name)
        scan_parms['SaveIntermediate'] = float(scan_parms['SaveIntermediate'])
        scan_parms['BlueEmphasis'] = float(scan_parms['BlueEmphasis'])

        if pos is not None:
            (sw, sh) = cuav_util.image_shape(img_scan)
            altitude = pos.altitude
            if altitude < camera_settings.minalt:
                altitude = camera_settings.minalt
            scan_parms[
                'MetersPerPixel'] = camera_settings.mpp100 * altitude / 100.0

            regions = scanner.scan(img_scan, scan_parms)
        else:
            regions = scanner.scan(img_scan)
        regions = cuav_region.RegionsConvert(regions,
                                             cuav_util.image_shape(img_scan),
                                             cuav_util.image_shape(im_full))
        count += 1
        t1 = time.time()

        frame_time = pos.time

        regions = cuav_region.filter_regions(
            im_full,
            regions,
            frame_time=frame_time,
            min_score=camera_settings.minscore,
            filter_type=camera_settings.filter_type)

        scan_count += 1

        if pos and len(regions) > 0:
            altitude = camera_settings.altitude
            if altitude <= 0:
                altitude = None
            joelog.add_regions(frame_time,
                               regions,
                               pos,
                               f,
                               width=w,
                               height=h,
                               altitude=altitude)

        mosaic.add_image(pos.time, f, pos)

        region_count += len(regions)

        if len(regions) > 0:
            composite = cuav_mosaic.CompositeThumbnail(
                cv.GetImage(cv.fromarray(im_full)), regions)
            thumbs = cuav_mosaic.ExtractThumbs(composite, len(regions))
            mosaic.add_regions(regions, thumbs, f, pos)

        if opts.view:
            img_view = img_scan
            (wview, hview) = cuav_util.image_shape(img_view)
            mat = cv.fromarray(img_view)
            for r in regions:
                r.draw_rectangle(mat, (255, 0, 0))
            cv.CvtColor(mat, mat, cv.CV_BGR2RGB)
            viewer.set_image(mat)
            viewer.set_title('Image: ' + os.path.basename(f))

        total_time += (t1 - t0)
        if t1 != t0:
            print('%s scan %.1f fps  %u regions [%u/%u]' %
                  (os.path.basename(f), count / total_time, region_count,
                   scan_count, num_files))
예제 #13
0
def main():
    os.chdir(sys.argv[1])

    #print "DELETE ALL FILES FIRST!"

    #tree = et.parse("project.xml")
    #movie = tree.getroot()
    #start_frame = int( movie.attrib["start_frame"] )
    #end_frame = int( movie.attrib["end_frame"] )

    f_shots = open("shots.txt")
    shots = [
        (int(start), int(end))
        for start, end in [line.split("\t")[0:2] for line in f_shots if line]
    ]
    f_shots.close()

    f_chapters = open("chapters.txt")
    chapters = [int(line) for line in f_chapters if line]
    f_chapters.close()
    '''# fix first and add last frame
	chapters[0] = start_frame
	chapters.append(end_frame)'''

    os.chdir("shot_colors")
    try:
        os.mkdir(OUTPUT_DIR_NAME)
    except:
        pass

    filenames = glob.glob("shot_colors_*.png")

    last_shot_nr = 0
    ch = 1
    for i, shot in enumerate(shots):
        start_frame, end_frame = shot
        if ch == len(chapters):  # will this ever happen, freder?
            print "den rest noch"
            #print " ".join(filenames[last_shot_nr:])
            os.system("convert %s -append chapters\\chapter_%02d.png" %
                      (" ".join(filenames[last_shot_nr:]), ch))
            break
        elif end_frame >= chapters[ch]:
            #if end_frame >= chapters[ch]:
            print ch, ":", last_shot_nr, "->", i - 1
            print " ".join(filenames[last_shot_nr:i])
            os.system("convert %s -append chapters\\chapter_%02d.png" %
                      (" ".join(filenames[last_shot_nr:i]), ch))
            last_shot_nr = i
            ch += 1

    os.chdir(OUTPUT_DIR_NAME)

    for file_nr, file in enumerate(os.listdir(os.getcwd())):
        if os.path.isdir(file):
            continue

        img_orig = cv.LoadImageM(file)
        w, h = img_orig.cols, img_orig.rows

        img_hls = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(img_orig, img_hls, cv.CV_BGR2HLS)

        output_img = cv.CreateImage((PIXELS_PER_COLOR * NUM_CLUSTERS, h),
                                    cv.IPL_DEPTH_8U, 3)

        # convert to numpy array
        a = numpy.asarray(cv.GetMat(img_hls))
        a = a.reshape(a.shape[0] * a.shape[1],
                      a.shape[2])  # make it 1-dimensional

        # set initial centroids
        init_cluster = []
        step = w / NUM_CLUSTERS
        #for x, y in [(0*step, h*0.1), (1*step, h*0.3), (2*step, h*0.5), (3*step, h*0.7), (4*step, h*0.9)]:
        for x, y in [(0 * step, h * 0.1), (1 * step, h * 0.1),
                     (2 * step, h * 0.3), (3 * step, h * 0.3),
                     (4 * step, h * 0.5), (5 * step, h * 0.5),
                     (6 * step, h * 0.7), (7 * step, h * 0.7),
                     (8 * step, h * 0.9), (9 * step, h * 0.9)]:
            x = int(x)
            y = int(y)
            init_cluster.append(a[y * w + x])

        centroids, labels = scipy.cluster.vq.kmeans2(a,
                                                     numpy.array(init_cluster))

        vecs, dist = scipy.cluster.vq.vq(a, centroids)  # assign codes
        counts, bins = scipy.histogram(vecs,
                                       len(centroids))  # count occurrences
        centroid_count = []
        for i, count in enumerate(counts):
            if count > 0:
                centroid_count.append((centroids[i].tolist(), count))

        centroid_count.sort(hls_sort2)

        px_count = w * h
        x = 0
        for item in centroid_count:
            count = item[1] * (PIXELS_PER_COLOR * NUM_CLUSTERS)
            count = int(math.ceil(count / float(px_count)))
            centroid = item[0]
            for l in range(count):
                if x + l >= PIXELS_PER_COLOR * NUM_CLUSTERS:
                    break
                for y in range(h):
                    cv.Set2D(output_img, y, x + l,
                             (centroid[0], centroid[1], centroid[2]))
            x += count

        output_img_rgb = cv.CreateImage(cv.GetSize(output_img),
                                        cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(output_img, output_img_rgb, cv.CV_HLS2BGR)
        cv.SaveImage(file, output_img_rgb)

        # save to text-file
        if file_nr == 0:
            f_out = open("..\\..\\chapter_colors.txt", "w")
            f_out.write("")  # reset
            f_out.close()

        f_out = open("..\\..\\chapter_colors.txt", "a")
        row = cv.GetRow(output_img_rgb, 0)
        WIDTH = row.cols
        #print WIDTH

        data_items = []
        counter = 0
        last_px = cv.Get1D(row, 0)
        for i in range(WIDTH):
            px = cv.Get1D(row, i)
            if px == last_px:
                counter += 1
                if i == WIDTH - 1:
                    #f_out.write("%d, %d, %d, %d _ " % (int(last_px[2]), int(last_px[1]), int(last_px[0]), counter))
                    data_items.append(
                        "%d, %d, %d, %d" % (int(last_px[2]), int(
                            last_px[1]), int(last_px[0]), counter))
                continue
            else:
                #f_out.write("%d, %d, %d, %d _ " % (int(last_px[2]), int(last_px[1]), int(last_px[0]), counter))
                data_items.append("%d, %d, %d, %d" % (int(
                    last_px[2]), int(last_px[1]), int(last_px[0]), counter))
                counter = 1
                last_px = px

        print NUM_CLUSTERS - len(data_items), "colors missing"
        for j in range(NUM_CLUSTERS -
                       len(data_items)):  # sometimes there are fewer colors
            data_items.append("0, 0, 0, 0")
        f_out.write(" _ ".join(data_items))
        f_out.write("\n")
        f_out.close()

    os.system("convert chapter_*.png -append _CHAPTERS.png")
    return
예제 #14
0
cv.NamedWindow("img", cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow("gray", cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow("bin_img", cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow("cnt", cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow("countours", cv.CV_WINDOW_AUTOSIZE)

cv.CvtColor(img, gray, cv.CV_RGB2GRAY)
cv.InRangeS(gray, cv.Scalar(1), cv.Scalar(250), bin_img)
cv.Canny(bin_img, cnt, 10, 100, 3)

storage = cv.CreateMemStorage(0)
contours = cv.FindContours(bin_img, storage, cv.CV_RETR_TREE,
                           cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))

while contours != None:
    cv.DrawContours(dst, contours, cv.CV_RGB(250, 0, 0), cv.CV_RGB(0, 0, 250),
                    2, 1, 8)
    contours = contours.h_next()

mat = cv.GetMat(dst, 0)
cv.Save('matrix.xml', mat)

cv.ShowImage("img", img)
cv.ShowImage("gray", gray)
cv.ShowImage("bin_img", bin_img)
cv.ShowImage("cnt", cnt)
cv.ShowImage("contours", dst)

cv.WaitKey(0)
예제 #15
0
파일: im.py 프로젝트: ahmetech/breakout
def cvimg2numpy(cvimg):
  return numpy.asarray(cv.GetMat(cvimg))
예제 #16
0
def getCenterHorizontal5(img):
    sub = cv.GetSubRect(img, (2 * img.width / 5, 0, img.width / 5, img.height))
    return cv.GetMat(sub)
예제 #17
0
def main():
    os.chdir(sys.argv[1])
    output_dir = os.path.join(OUTPUT_DIR_NAME, OUTPUT_DIR_NAME)
    try:
        os.mkdir(output_dir)
    except:
        pass

    os.chdir(OUTPUT_DIR_NAME)
    for file in os.listdir(os.getcwd()):
        if os.path.isdir(file):
            continue
        print(file)
        if file == ".DS_Store":
            print("Stupid .DS_Store files from Finder. Ignore these.")
        else:
            img_orig = cv.LoadImageM(file)
            w, h = img_orig.cols, img_orig.rows

            img_hls = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 3)
            cv.CvtColor(img_orig, img_hls, cv.CV_BGR2HLS)

            output_img = cv.CreateImage((PIXELS_PER_COLOR * NUM_CLUSTERS, h),
                                        cv.IPL_DEPTH_8U, 3)

            # convert to numpy array
            a = numpy.asarray(cv.GetMat(img_hls))
            a = a.reshape(a.shape[0] * a.shape[1],
                          a.shape[2])  # make it 1-dimensional

            # set initial centroids
            init_cluster = []
            step = w / NUM_CLUSTERS
            for x, y in [(0 * step, h * 0.1), (1 * step, h * 0.3),
                         (2 * step, h * 0.5), (3 * step, h * 0.7),
                         (4 * step, h * 0.9)]:
                x = int(x)
                y = int(y)
                init_cluster.append(a[y * w + x])
            centroids, labels = scipy.cluster.vq.kmeans2(
                a, numpy.array(init_cluster))

            vecs, dist = scipy.cluster.vq.vq(a, centroids)  # assign codes
            counts, bins = scipy.histogram(vecs,
                                           len(centroids))  # count occurrences
            centroid_count = []
            for i, count in enumerate(counts):
                if count > 0:
                    centroid_count.append((centroids[i].tolist(), count))

            #centroids = centroids.tolist()
            #centroids.sort(hls_sort)

            centroid_count.sort(hls_sort2)

            px_count = w * h
            x = 0
            for item in centroid_count:
                count = item[1] * (PIXELS_PER_COLOR * NUM_CLUSTERS)
                count = int(math.ceil(count / float(px_count)))
                centroid = item[0]
                for l in range(count):
                    if x + l >= PIXELS_PER_COLOR * NUM_CLUSTERS:
                        break
                    for y in range(h):
                        cv.Set2D(output_img, y, x + l,
                                 (centroid[0], centroid[1], centroid[2]))
                x += count

            #for centroid_nr, centroid in enumerate(centroids):
            #	for j in range(PIXELS_PER_COLOR):
            #		x = centroid_nr*PIXELS_PER_COLOR + j
            #		for y in range(h):
            #			cv.Set2D(output_img, y, x, (centroid[0], centroid[1], centroid[2]))

            output_img_rgb = cv.CreateImage(cv.GetSize(output_img),
                                            cv.IPL_DEPTH_8U, 3)
            cv.CvtColor(output_img, output_img_rgb, cv.CV_HLS2BGR)
            cv.SaveImage(os.path.join(OUTPUT_DIR_NAME, file), output_img_rgb)

    print("appending...")
    os.chdir(OUTPUT_DIR_NAME)
    os.system("convert shot_colors_*.png -append result.png")

    #raw_input("- done -")
    return
예제 #18
0
def extractVertical(img, vstart, vend):
    sub = cv.GetSubRect(img, (0, vstart, img.width, vend + 1))
    return cv.GetMat(sub)
예제 #19
0
def track(bgr_image, threshold=100):
    '''Accepts BGR image and optional object threshold between 0 and 255 (default = 100).
       Returns: (x,y) coordinates of centroid if found
                (-1,-1) if no centroid was found
                None if user hit ESC
    '''

    # Extract bytes, width, and height
    bgr_bytes = bgr_image.tostring()
    width = bgr_image.width
    height = bgr_image.height

    # Create separate red, green, and blue image matrices from bytes
    r_image = _create_grayscale_mat(bgr_bytes, width, height, 2)
    b_image = _create_grayscale_mat(bgr_bytes, width, height, 0)
    g_image = _create_grayscale_mat(bgr_bytes, width, height, 1)

    # Remove 1/3 of red and blue components from green
    threes_image = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)
    cv.Set(threes_image, 3)

    # Remove 1/3 of green and red components from blue
    #_div_and_sub(b_image, r_image, threes_image)
    #_div_and_sub(b_image, g_image, threes_image)

    # Remove 1/3 of green and blue components from red
    #_div_and_sub(r_image, g_image, threes_image)
    #_div_and_sub(r_image, b_image, threes_image)

    #Remove 1/3 of red and blue components from green
    _div_and_sub(g_image, r_image, threes_image)
    _div_and_sub(g_image, b_image, threes_image)

    #Threshold and erode green image
    cv.Threshold(g_image, g_image, threshold, 255, cv.CV_THRESH_BINARY)
    cv.Erode(g_image, g_image)

    # Threshold and erode red image
    #cv.Threshold(r_image, r_image, threshold, 255, cv.CV_THRESH_BINARY)
    #cv.Erode(r_image, r_image)

    # Threshold and erode blue image
    #cv.Threshold(b_image, b_image, threshold, 255, cv.CV_THRESH_BINARY)
    #cv.Erode(b_image, b_image)

    # Find centroid of eroded image
    moments = cv.Moments(cv.GetMat(g_image), 1)  # binary flag

    #moments = cv.Moments(cv.GetMat(r_image), 1) # binary flag

    #moments = cv.Moments(cv.GetMat(b_image), 1) # binary flag

    centroid_x = _centroid(moments, 1, 0)
    centroid_y = _centroid(moments, 0, 1)

    # Assume no centroid
    ctr = (-1, -1)

    # Use centroid if it exists
    if centroid_x != None and centroid_y != None:

        ctr = (centroid_x, centroid_y)

        # Put black circle in at centroid in image
        cv.Circle(bgr_image, ctr, 4, (0, 0, 0))

    # Display full-color image
    cv.NamedWindow(WINDOW_NAME)
    cv.ShowImage(WINDOW_NAME, bgr_image)

    # Force image display, setting centroid to None on ESC key input
    if cv.WaitKey(5) == 27:
        ctr = None

    # Return coordinates of centroid
    return ctr
예제 #20
0
def extractHorizontal(img, hstart, hend):
    sub = cv.GetSubRect(img, (hstart, 0, hend, img.height))
    return cv.GetMat(sub)
예제 #21
0
 def Image_callback(self, image):
     try:
         self.matImage = np.uint8(
             cv.GetMat(self.cvbridge.imgmsg_to_cv(image, "passthrough")))
     except CvBridgeError, e:
         rospy.logwarn('Exception converting ROS image to opencv:  %s' % e)
예제 #22
0
    def Image_callback(self, image):
        if self.bInitialized:
            self.stampImage = image.header.stamp
            try:
                imgInput = np.uint8(cv.GetMat(self.cvbridge.imgmsg_to_cv(image, 'passthrough')))
            except CvBridgeError, e:
                rospy.logwarn('Exception CvBridgeError in image callback: %s' % e)

            
            if (not self.bInitialized_images):
                self.InitializeImages(imgInput.shape)
            
            if self.bInitialized_images:
                xText = 25
                yText = 25
                dyText = 20
                
                self.imgProcessed = copy.copy(imgInput)
                self.imgDisplay = cv2.cvtColor(imgInput, cv.CV_GRAY2RGB)
                
                display_text = 'checker_size=%0.3f' % self.checker_size
                cv2.putText(self.imgDisplay, display_text,
                           (xText,yText), cv.CV_FONT_HERSHEY_TRIPLEX, 0.5, self.colorFont)
                yText += dyText
        
                display_text = 'originArena = [%0.0f, %0.0f]' % (self.ptOriginArena.point.x, self.ptOriginArena.point.y)
                cv2.putText(self.imgDisplay, display_text, (xText,yText), cv.CV_FONT_HERSHEY_TRIPLEX, 0.5, self.colorFont)
                yText += dyText
                
                # Update image mask
                cv2.circle(self.imgMask, 
                           (int(self.ptOriginArena.point.x), int(self.ptOriginArena.point.y)), 
                           int(self.radiusMask), self.colorMax, cv.CV_FILLED)
                self.imgProcessed = cv2.bitwise_and(self.imgProcessed, self.imgMask)
                self.FindExtrinsics()

                # Draw the arena origin.
                cv2.circle(self.imgDisplay, 
                           (int(self.ptOriginArena.point.x),int(self.ptOriginArena.point.y)), 
                           3, cv.CV_RGB(0,self.colorMax,0), cv.CV_FILLED)
                
                # Draw the mask circle
                cv2.circle(self.imgDisplay, 
                           (int(self.ptOriginArena.point.x),int(self.ptOriginArena.point.y)), 
                           int(self.radiusMask), cv.CV_RGB(0,self.colorMax,0))
                
                
                display_text = 'radiusMask = ' + str(int(self.radiusMask))
                cv2.putText(self.imgDisplay, display_text, (xText,yText), cv.CV_FONT_HERSHEY_TRIPLEX, 0.5, self.colorFont)
                yText += dyText

                display_text = 'rvec cur=[%+3.3f, %+3.3f, %+3.3f] avg=[%3.3f, %3.3f, %3.3f]' % ((self.rvec2[0]+np.pi)%(2*np.pi)-np.pi,     
                                                                                                (self.rvec2[1]+np.pi)%(2*np.pi)-np.pi,     
                                                                                                (self.rvec2[2]+np.pi)%(2*np.pi)-np.pi,
                                                                                                (self.rvec2_avg[0]+np.pi)%(2*np.pi)-np.pi, 
                                                                                                (self.rvec2_avg[1]+np.pi)%(2*np.pi)-np.pi, 
                                                                                                (self.rvec2_avg[2]+np.pi)%(2*np.pi)-np.pi)
                cv2.putText(self.imgDisplay, display_text,
                           (xText,yText), cv.CV_FONT_HERSHEY_TRIPLEX, 0.5, self.colorFont)
                yText += dyText
                
                display_text = 'tvec cur=[%+3.3f, %+3.3f, %+3.3f] avg=[%3.3f, %3.3f, %3.3f]' % (self.tvec2[0],     self.tvec2[1],     self.tvec2[2],
                                                                                                self.tvec2_avg[0], self.tvec2_avg[1], self.tvec2_avg[2])
                cv2.putText(self.imgDisplay, display_text,
                            (xText,yText), cv.CV_FONT_HERSHEY_TRIPLEX, 0.5, self.colorFont)
                yText += dyText

                display_text = 'pixels_per_mm=%3.3f' % (self.K_rect[0,0]/self.tvec2_avg[2])
                cv2.putText(self.imgDisplay, display_text,
                            (xText,yText), cv.CV_FONT_HERSHEY_TRIPLEX, 0.5, self.colorFont)
                yText += dyText
    
                
                cv2.imshow('Camera Arena Calibration', self.imgDisplay)
                cv2.waitKey(3)
예제 #23
0
 def find_centroid(self, binary):
     mat = cv.GetMat(binary)
     moments = cv.Moments(mat)
     return (int(moments.m10 / moments.m00), int(moments.m01 / moments.m00))
예제 #24
0
파일: scantest.py 프로젝트: weihli/cuav
def process(args):
    '''process a set of files'''

    global slipmap, mosaic
    scan_count = 0
    files = []
    for a in args:
        if os.path.isdir(a):
            files.extend(glob.glob(os.path.join(a, '*.pgm')))
        else:
            files.append(a)
    files.sort()
    num_files = len(files)
    print("num_files=%u" % num_files)
    region_count = 0
    joes = []

    if opts.mavlog:
        mpos = mav_position.MavInterpolator(gps_lag=opts.gps_lag)
        mpos.set_logfile(opts.mavlog)
    else:
        mpos = None

    if opts.boundary:
        boundary = cuav_util.polygon_load(opts.boundary)
    else:
        boundary = None

    if opts.mosaic:
        slipmap = mp_slipmap.MPSlipMap(service='GoogleSat',
                                       elevation=True,
                                       title='Map')
        icon = slipmap.icon('planetracker.png')
        slipmap.add_object(
            mp_slipmap.SlipIcon('plane', (0, 0),
                                icon,
                                layer=3,
                                rotation=0,
                                follow=True,
                                trail=mp_slipmap.SlipTrail()))
        C_params = cam_params.CameraParams(lens=opts.lens)
        path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
                            '..', 'cuav', 'data', 'chameleon1_arecont0.json')
        C_params.load(path)
        mosaic = cuav_mosaic.Mosaic(slipmap, C=C_params)
        if boundary is not None:
            mosaic.set_boundary(boundary)

    if opts.joe:
        joes = cuav_util.polygon_load(opts.joe)
        if boundary:
            for i in range(len(joes)):
                joe = joes[i]
                if cuav_util.polygon_outside(joe, boundary):
                    print("Error: joe outside boundary", joe)
                    return
                icon = slipmap.icon('flag.png')
                slipmap.add_object(
                    mp_slipmap.SlipIcon('joe%u' % i, (joe[0], joe[1]),
                                        icon,
                                        layer=4))

    joelog = cuav_joe.JoeLog('joe.log')

    if opts.view:
        viewer = mp_image.MPImage(title='Image')

    frame_time = 0

    scan_parms = {
        'MinRegionArea': opts.min_region_area,
        'MaxRegionArea': opts.max_region_area,
        'MinRegionSize': opts.min_region_size,
        'MaxRegionSize': opts.max_region_size,
        'MaxRarityPct': opts.max_rarity_pct,
        'RegionMergeSize': opts.region_merge,
        'SaveIntermediate': float(opts.debug),
        'MetersPerPixel': opts.meters_per_pixel
    }

    if opts.filter_type == 'compactness':
        calculate_compactness = True
        print("Using compactness filter")
    else:
        calculate_compactness = False

    for f in files:
        if mpos:
            frame_time = cuav_util.parse_frame_time(f)
            try:
                if opts.roll_stabilised:
                    roll = 0
                else:
                    roll = None
                pos = mpos.position(frame_time, opts.max_deltat, roll=roll)
                slipmap.set_position('plane', (pos.lat, pos.lon),
                                     rotation=pos.yaw)
            except mav_position.MavInterpolatorException as e:
                print e
                pos = None
        else:
            pos = None

        # check for any events from the map
        if opts.mosaic:
            slipmap.check_events()
            mosaic.check_events()

        if f.endswith('.pgm'):
            pgm = cuav_util.PGM(f)
            im = pgm.array
            if pgm.eightbit:
                im_8bit = im
            else:
                im_8bit = numpy.zeros((960, 1280, 1), dtype='uint8')
                if opts.gamma != 0:
                    scanner.gamma_correct(im, im_8bit, opts.gamma)
                else:
                    scanner.reduce_depth(im, im_8bit)
            im_full = numpy.zeros((960, 1280, 3), dtype='uint8')
            scanner.debayer(im_8bit, im_full)
            im_640 = numpy.zeros((480, 640, 3), dtype='uint8')
            scanner.downsample(im_full, im_640)
        else:
            im_orig = cv.LoadImage(f)
            (w, h) = cuav_util.image_shape(im_orig)
            im_full = im_orig
            im_640 = cv.CreateImage((640, 480), 8, 3)
            cv.Resize(im_full, im_640, cv.CV_INTER_NN)
            im_640 = numpy.ascontiguousarray(cv.GetMat(im_640))
            im_full = numpy.ascontiguousarray(cv.GetMat(im_full))

        count = 0
        total_time = 0
        if opts.fullres:
            img_scan = im_full
        else:
            img_scan = im_640

        t0 = time.time()
        for i in range(opts.repeat):
            regions = scanner.scan(img_scan, scan_parms)
            regions = cuav_region.RegionsConvert(
                regions, cuav_util.image_shape(img_scan),
                cuav_util.image_shape(im_full), calculate_compactness)
            count += 1
        t1 = time.time()

        if opts.filter:
            regions = cuav_region.filter_regions(im_full,
                                                 regions,
                                                 frame_time=frame_time,
                                                 min_score=opts.minscore,
                                                 filter_type=opts.filter_type)

        scan_count += 1

        # optionally link all the images with joe into a separate directory
        # for faster re-running of the test with just joe images
        if pos and opts.linkjoe and len(regions) > 0:
            cuav_util.mkdir_p(opts.linkjoe)
            if not cuav_util.polygon_outside((pos.lat, pos.lon), boundary):
                joepath = os.path.join(opts.linkjoe, os.path.basename(f))
                if os.path.exists(joepath):
                    os.unlink(joepath)
                os.symlink(f, joepath)

        if pos and len(regions) > 0:
            joelog.add_regions(frame_time,
                               regions,
                               pos,
                               f,
                               width=1280,
                               height=960,
                               altitude=opts.altitude)

            if boundary:
                regions = cuav_region.filter_boundary(regions, boundary, pos)

        region_count += len(regions)

        if opts.mosaic and len(regions) > 0:
            composite = cuav_mosaic.CompositeThumbnail(
                cv.GetImage(cv.fromarray(im_full)), regions)
            thumbs = cuav_mosaic.ExtractThumbs(composite, len(regions))
            mosaic.add_regions(regions, thumbs, f, pos)

        if opts.compress:
            jpeg = scanner.jpeg_compress(im_full, opts.quality)
            jpeg_filename = f[:-4] + '.jpg'
            if os.path.exists(jpeg_filename):
                print('jpeg %s already exists' % jpeg_filename)
                continue
            chameleon.save_file(jpeg_filename, jpeg)

        if opts.view:
            if opts.fullres:
                img_view = im_full
            else:
                img_view = img_scan
            mat = cv.fromarray(img_view)
            for r in regions:
                (x1, y1, x2, y2) = r.tuple()
                (w, h) = cuav_util.image_shape(img_view)
                x1 = x1 * w // 1280
                x2 = x2 * w // 1280
                y1 = y1 * h // 960
                y2 = y2 * h // 960
                cv.Rectangle(mat, (max(x1 - 2, 0), max(y1 - 2, 0)),
                             (x2 + 2, y2 + 2), (255, 0, 0), 2)
            cv.CvtColor(mat, mat, cv.CV_BGR2RGB)
            viewer.set_image(mat)

        total_time += (t1 - t0)
        if t1 != t0:
            print('%s scan %.1f fps  %u regions [%u/%u]' %
                  (f, count / total_time, region_count, scan_count, num_files))
    def atualiza_brilhocontraste(self):

        if self.contraste > 0:
            delta = 127. * self.contraste / 100
            a = 255. / (255. - delta * 2)
            b = a * (self.brilho - delta)
        else:
            delta = -128. * self.contraste / 100
            a = (256. - delta * 2) / 255.
            b = a * self.brilho + delta

        cv.ConvertScale(self.imagem_fonte, self.imagem_destino, a, b)
        cv.ShowImage("Foto Tons Cinza", self.imagem_destino)
        self.calcularHistograma()
        cv.ShowImage("Histograma", self.imagemHistograma)

        cv.EqualizeHist(self.imagem_destino, self.imagem_destino)
        cv.ShowImage("Foto Equalizada", self.imagem_destino)
        self.calcularHistograma()
        cv.ShowImage("Histograma Equalizado", self.imagemHistograma)


if __name__ == "__main__":
    imagem = cv.LoadImageM('foto.jpg')
    cv.NamedWindow("Foto Original", 0)
    cv.ShowImage("Foto Original", imagem)
    imagem_cinza = cv.GetMat(cv.LoadImage('foto.jpg', 0))
    histograma = Histograma(imagem_cinza)
    cv.WaitKey(0)
예제 #26
0
 def _hist(self, i):
     (hist, bin_edges) = np.histogram(np.asarray(cv.GetMat(i)), bins=50)
     rd.set("histogram", json.dumps(hist.tolist()))
예제 #27
0
def main():
    project_root_dir = sys.argv[1]
    os.chdir(project_root_dir)
    os.chdir(os.path.join(OUTPUT_DIR_NAME, OUTPUT_DIR_NAME))

    output_img = cv.CreateImage((WIDTH, WIDTH), cv.IPL_DEPTH_8U, 3)

    print(os.system("identify -format \"%k\" result.png"))
    print("reducing colors to 10")
    os.system("convert result.png +dither -colors 10 result_quant.png")

    img_orig = cv.LoadImageM("result_quant.png")
    output_img = cv.CreateImage((WIDTH, WIDTH), cv.IPL_DEPTH_8U, 3)

    img_hls = cv.CreateImage(cv.GetSize(img_orig), cv.IPL_DEPTH_8U, 3)
    cv.CvtColor(img_orig, img_hls, cv.CV_BGR2HLS)

    pixels = numpy.asarray(cv.GetMat(img_hls))
    d = {}

    print("counting...")
    for line in pixels:
        for px in line:
            if tuple(px) in d:
                d[tuple(px)] += 1
            else:
                d[tuple(px)] = 1

    colors = d.keys()
    #print "%d pixels, %d colors" % (img_orig.width*img_orig.height, len(colors))

    print("sorting...")
    #colors.sort(hls_sort)
    colors = sort_by_distance(colors)

    px_count = img_orig.width * img_orig.height
    x_pos = 0

    print("building image...")
    for color in colors:
        l = d[color] / float(px_count)
        l = int(math.ceil(l * WIDTH))

        for x in range(l):
            if x_pos + x >= WIDTH:
                break
            for y in range(WIDTH):
                cv.Set2D(output_img, y, x_pos + x,
                         (int(color[0]), int(color[1]), int(color[2])))
        x_pos += l

    print("saving...")
    output_img_rgb = cv.CreateImage(cv.GetSize(output_img), cv.IPL_DEPTH_8U, 3)
    cv.CvtColor(output_img, output_img_rgb, cv.CV_HLS2BGR)
    cv.SaveImage("_RESULT.png", output_img_rgb)

    f = open(os.path.join("..", "..", "colors.txt"), "w")
    row = cv.GetRow(output_img_rgb, 0)

    counter = 0
    last_px = cv.Get1D(row, 0)
    for i in range(WIDTH):
        px = cv.Get1D(row, i)
        if px == last_px:
            counter += 1
            if i == WIDTH - 1:
                f.write("%d, %d, %d, %d\n" % (int(last_px[2]), int(
                    last_px[1]), int(last_px[0]), counter))
            continue
        else:
            f.write(
                "%d, %d, %d, %d\n" %
                (int(last_px[2]), int(last_px[1]), int(last_px[0]), counter))
            counter = 1
            last_px = px
    f.close()

    return
예제 #28
0
    def run(self):
        # Initialize
        #log_file_name = "tracker_output.log"
        #log_file = file( log_file_name, 'a' )

        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame),
                                               cv.IPL_DEPTH_32F, 3)
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)

        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)

        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)

        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []

        t0 = time.time()

        # For toggling display:
        image_list = ["camera", "difference", "threshold", "display", "faces"]
        image_index = 0  # Index into image_list

        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1,
                                cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        ###############################
        ### Face detection stuff
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        haar_cascade = cv.Load('haarcascades/haarcascade_frontalface_alt.xml')
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )

        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 3

        while True:

            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)

            frame_count += 1
            frame_t0 = time.time()

            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)

            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image,
                            running_average_in_display_color_depth, 1.0, 0.0)

            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth,
                       difference)

            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)
            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)

            grey_image_as_array = numpy.asarray(cv.GetMat(grey_image))
            non_black_coords_array = numpy.where(grey_image_as_array > 3)
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1],
                                         non_black_coords_array[0])

            points = [
            ]  # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []

            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours(grey_image, mem_storage,
                                      cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            while contour:

                bounding_rect = cv.BoundingRect(list(contour))
                point1 = (bounding_rect[0], bounding_rect[1])
                point2 = (bounding_rect[0] + bounding_rect[2],
                          bounding_rect[1] + bounding_rect[3])

                bounding_box_list.append((point1, point2))
                polygon_points = cv.ApproxPoly(list(contour), mem_storage,
                                               cv.CV_POLY_APPROX_DP)

                # To track polygon points only (instead of every pixel):
                #points += list(polygon_points)

                # Draw the contours:
                ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly(grey_image, [
                    list(polygon_points),
                ], cv.CV_RGB(255, 255, 255), 0, 0)
                cv.PolyLine(display_image, [
                    polygon_points,
                ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)

                contour = contour.h_next()

            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append(box_width * box_height)

                #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)

            average_box_area = 0.0
            if len(box_areas):
                average_box_area = float(sum(box_areas)) / len(box_areas)

            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]

                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area * 0.1:
                    trimmed_box_list.append(box)

            # Draw the trimmed box list:
            #for box in trimmed_box_list:
            #	cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )

            bounding_box_list = merge_collided_bboxes(trimmed_box_list)

            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle(display_image, box[0], box[1],
                             cv.CV_RGB(0, 255, 0), 1)

            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len(bounding_box_list)

            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.

            if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1:
                    estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1:
                    estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0

            # Clip to the user-supplied maximum:
            estimated_target_count = min(estimated_target_count, max_targets)

            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.

            # Using the numpy values directly (treating all pixels as points):
            points = non_black_coords_array
            center_points = []

            if len(points):

                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max(estimated_target_count,
                                 1)  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook

                #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans(array(points), k_or_guess)

                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = (int(center_point[0]), int(center_point[1]))
                    center_points.append(center_point)
                    #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)

            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []

            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []

                for center_point in center_points:
                    if center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                     center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :

                        # This point is within the box.
                        center_points_in_box.append(center_point)

                # Now see if there are more than one.  If so, merge them.
                if len(center_points_in_box) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])

                    average_x = int(float(sum(x_list)) / len(x_list))
                    average_y = int(float(sum(y_list)) / len(y_list))

                    trimmed_center_points.append((average_x, average_y))

                    # Record that they were removed:
                    removed_center_points += center_points_in_box

                if len(center_points_in_box) == 1:
                    trimmed_center_points.append(
                        center_points_in_box[0])  # Just use it.

            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (
                        not center_point in removed_center_points):
                    trimmed_center_points.append(center_point)

            # Draw what we found:
            #for center_point in trimmed_center_points:
            #	center_point = ( int(center_point[0]), int(center_point[1]) )
            #	cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #	cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #	cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #	cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)

            # Determine if there are any new (or lost) targets:
            actual_target_count = len(trimmed_center_points)
            last_target_count = actual_target_count

            # Now build the list of physical entities (objects)
            this_frame_entity_list = []

            # An entity is list: [ name, color, last_time_seen, last_known_coords ]

            for target in trimmed_center_points:

                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}

                for entity in last_frame_entity_list:

                    entity_coords = entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]

                    distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                    entity_distance_dict[distance] = entity

                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()

                for distance in distance_list:

                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[distance]

                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                        continue

                    #print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[
                        2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[
                        3] = target  # Update the new location
                    this_frame_entity_list.append(nearest_possible_entity)
                    #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break

                if entity_found == False:
                    # It's a new entity.
                    color = (random.randint(0, 255), random.randint(0, 255),
                             random.randint(0, 255))
                    name = hashlib.md5(str(frame_t0) +
                                       str(color)).hexdigest()[:6]
                    last_time_seen = frame_t0

                    new_entity = [name, color, last_time_seen, target]
                    this_frame_entity_list.append(new_entity)
                    #log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.

            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append(entity)

            # For next frame:
            last_frame_entity_list = this_frame_entity_list

            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10,
                          cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point, 5,
                          cv.CV_RGB(c[0], c[1], c[2]), 3)

            #print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break

            # Toggle which image to show
            if chr(c) == 'd':
                image_index = (image_index + 1) % len(image_list)

            image_name = image_list[image_index]

            # Display frame to user
            if image_name == "camera":
                image = camera_image
                cv.PutText(image, "Camera (Normal)", text_coord, text_font,
                           text_color)
            elif image_name == "difference":
                image = difference
                cv.PutText(image, "Difference Image", text_coord, text_font,
                           text_color)
            elif image_name == "display":
                image = display_image
                cv.PutText(image, "Targets (w/AABBs and contours)", text_coord,
                           text_font, text_color)
            elif image_name == "threshold":
                # Convert the image to color.
                cv.CvtColor(grey_image, display_image, cv.CV_GRAY2RGB)
                image = display_image  # Re-use display image here
                cv.PutText(image, "Motion Mask", text_coord, text_font,
                           text_color)
            elif image_name == "faces":
                # Do face detection
                detect_faces(camera_image, haar_cascade, mem_storage)
                image = camera_image  # Re-use camera image here
                cv.PutText(image, "Face Detection", text_coord, text_font,
                           text_color)

            cv.ShowImage("Target", image)

            if self.writer:
                cv.WriteFrame(self.writer, image)

            #log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta,
                                                  processed_fps)
예제 #29
0
class uav_image_green:
    def __init__(self):
        self.image_pub = rospy.Publisher("/image", Image)
        cv2.namedWindow("Image window", 1)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/uav/downward_cam/camera/image",
                                          Image, self.callback)

    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError, e:
            print e
### Start of Image Processing ######

        hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
        lower_green = np.array([50, 50, 50])
        upper_green = np.array([70, 255, 255])
        mask = cv2.inRange(hsv, lower_green, upper_green)
        mat = cv.GetMat(cv.fromarray(mask))
        moments = cv.Moments(mat)
        if ((moments.m01 >
             (-10000000000000)) and (moments.m01 < 10000000000000)
                and (moments.m00 >
                     (-1000000000000)) and (moments.m00 < 10000000000000)
                and (moments.m10 >
                     (-1000000000000)) and (moments.m10 < 10000000000000)):
            global yc_green
            global xc_green
            yc_green = moments.m01 / moments.m00
            xc_green = moments.m10 / moments.m00
            width, height = cv.GetSize(mat)
            global max_right_green_x
            global max_right_green_y
            global max_left_green_x
            global max_left_green_y
            global bottom_green_y
            global bottom_green_x

            contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_NONE)
            cnt = contours[0]
            #print contours

            leftmost = tuple(cnt[cnt[:, :, 0].argmin()][0])
            rightmost = tuple(cnt[cnt[:, :, 0].argmax()][0])
            bottommost = tuple(cnt[cnt[:, :, 1].argmax()][0])

            max_left_green_x = leftmost[0]
            max_left_green_y = leftmost[1]
            max_right_green_x = rightmost[0]
            max_right_green_y = rightmost[1]
            bottom_green_x = bottommost[0]
            bottom_green_y = bottommost[1]

            # max_right_green_x = 0
            # for a in range(int(round(xc_green)), width, 3):
            #    for b in range (0, height, 3):
            #       if(mat[b,a] == 0):
            #          continue
            #       elif(a > max_right_green_x):
            #          max_right_green_x = a
            #          max_right_green_y = b
            # global max_left_green_x
            # global max_left_green_y
            # max_left_green_x = width
            # for a2 in range(int(round(xc_green)), 0, -3):
            #    for b2 in range (0, height, 3):
            #       if(mat[b2,a2] == 0):
            #          continue
            #       elif(a2 < max_left_green_x):
            #          max_left_green_x = a2
            #          max_left_green_y = b2

            global nxc_green
            global nyc_green
            nxc_green = xc_green - 320
            nyc_green = yc_green - 240
        focal = 1690.0  #1097.51
        q = nxc_green / focal
        global bearing
        bearing = math.atan(q) * ((180.0) / (3.14159))
        # cv2.circle(cv_image,(int(max_right_green_x),int(yc_green)),3,(0,0,255),-1)
        # cv2.circle(cv_image,(int(max_left_green_y),int(yc_green)),3,(0,0,255),-1)
        #cv2.imshow('Thresholded', mask)

        ### End of Image Processing ######
        #cv2.imshow('UAV Image', cv_image)
        cv2.waitKey(1)

        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError, e:
            print e
class Target:
    def __init__(self):
        self.capture = cv.CaptureFromCAM(0)
        cv.NamedWindow(“Target”, 1)
        cv.NamedWindow(“Threshold1”,1)
        cv.NamedWindow(“Threshold2”,1)
        cv.NamedWindow(“hsv”,1)

    def run(self):
        #initiate font
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
        #instantiate images
        hsv_img=cv.CreateImage(cv.GetSize(cv.QueryFrame(self.capture)),8,3)
        threshold_img1 = cv.CreateImage(cv.GetSize(hsv_img),8,1)
        threshold_img1a = cv.CreateImage(cv.GetSize(hsv_img),8,1)
        threshold_img2 = cv.CreateImage(cv.GetSize(hsv_img),8,1)
        i=0
        writer=cv.CreateVideoWriter(“angle_tracking.avi”,cv.CV_FOURCC(‘M’,’J’,’P’,’G’),30,cv.GetSize(hsv_img),1)
    while True:
        #capture the image from the cam
        img=cv.QueryFrame(self.capture)
        #convert the image to HSV
        cv.CvtColor(img,hsv_img,cv.CV_BGR2HSV)
        #threshold the image to isolate two colors
        cv.InRangeS(hsv_img,(165,145,100),(250,210,160),threshold_img1) #red
        cv.InRangeS(hsv_img,(0,145,100),(10,210,160),threshold_img1a) #red again
        cv.Add(threshold_img1,threshold_img1a,threshold_img1) #this is combining the two limits for red
        cv.InRangeS(hsv_img,(105,180,40),(120,260,100),threshold_img2) #blue
        #determine the moments of the two objects
        threshold_img1=cv.GetMat(threshold_img1)
        threshold_img2=cv.GetMat(threshold_img2)
        moments1=cv.Moments(threshold_img1,0)
        moments2=cv.Moments(threshold_img2,0)
        area1=cv.GetCentralMoment(moments1,0,0)
        area2=cv.GetCentralMoment(moments2,0,0)

        #initialize x and y
        x1,y1,x2,y2=(1,2,3,4)
        coord_list=[x1,y1,x2,y2]
        for x in coord_list:
            x=0

            #there can be noise in the video so ignore objects with small areas
            if (area1 >200000):
                #x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
                x1=int(cv.GetSpatialMoment(moments1,1,0)/area1)
                y1=int(cv.GetSpatialMoment(moments1,0,1)/area1)
                #draw circle
                cv.Circle(img,(x1,y1),2,(0,255,0),20)
                #write x and y position
                cv.PutText(img,str(x1)+”,”+str(y1),(x1,y1+20),font, 255) #Draw the text
            if (area2 >100000):
                #x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
                x2=int(cv.GetSpatialMoment(moments2,1,0)/area2)
                y2=int(cv.GetSpatialMoment(moments2,0,1)/area2)
                #draw circle
                cv.Circle(img,(x2,y2),2,(0,255,0),20)
                cv.PutText(img,str(x2)+”,”+str(y2),(x2,y2+20),font, 255) #Draw the text
                cv.Line(img,(x1,y1),(x2,y2),(0,255,0),4,cv.CV_AA)
                #draw line and angle
                cv.Line(img,(x1,y1),(cv.GetSize(img)[0],y1),(100,100,100,100),4,cv.CV_AA)
                x1=float(x1)
                y1=float(y1)
                x2=float(x2)
                y2=float(y2)
                angle = int(math.atan((y1-y2)/(x2-x1))*180/math.pi)
                cv.PutText(img,str(angle),(int(x1)+50,(int(y2)+int(y1))/2),font,255)
                #cv.WriteFrame(writer,img)
                #display frames to users
                cv.ShowImage(“Target”,img)
                cv.ShowImage(“Threshold1”,threshold_img1)
                cv.ShowImage(“Threshold2”,threshold_img2)
                cv.ShowImage(“hsv”,hsv_img)
            # Listen for ESC or ENTER key
        c = cv.WaitKey(7) % 0x100
        if c == 27 or c == 10:
            break
    cv.DestroyAllWindows()