@author Jeremy Barr
@date 5/1/2013
@brief Test program using OpenCV Python and raspicam. Background subtraction from camera images.
'''

import cv
from raspicam import RaspiCam

# capture image from camera
#capture = cv.CaptureFromCAM(0)
# Wait 200ms to initialize capture and take an image
#cv.WaitKey(200)
#frame = cv.QueryFrame(capture)
capture = RaspiCam()
frame = capture.piCapture()
''' frame gets copied into temp and then smoothed using CV_BLUR
CV_BLUR: linear convolution with size1 X size2 box kernel (all 1's) 
   	 with subsequent scaling by 1/(size1 * size2)
'''
temp = cv.CloneImage(frame)
cv.Smooth(temp, temp, cv.CV_BLUR, 5, 5)

while True:
    frame = cv.QueryFrame(capture)
    cv.AbsDiff(frame, temp, frame)
    cv.ShowImage('Original', frame)
    cv.ShowImage('Abs Diff', temp)
    c = cv.WaitKey(2)
    if c == 27:  # Break if user enters ESC
        break
Beispiel #2
0
    def run(self):
        template_filename = os.path.join(root, 'flash', 'fft2', 'processed', 'first_screen.png')
        template = cv2.imread(template_filename, cv2.CV_LOAD_IMAGE_GRAYSCALE)

        cv2.imshow('image', template)
        w, h = template.shape[::-1]

        #find starting image
        map_filename = os.path.join(root, 'flash', 'fft2', 'processed', 'aligned_localization_data_map.png')

        mapper = LocalizeMap(map_filename)

        map_box = mapper.localize(template, None)
        (x0, y0, x1, y1) = map_box
        reference = mapper.reference[y0:y1, x0:x1]
        cv2.imshow('image', reference)
        #blank_image = np.zeros(template.shape, np.uint8)
        
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        return 

        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]

            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

            if len(points):
                center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 100, 0), 1)

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
Beispiel #3
0
    cv.FindHomography(p, h, p2h)
    #cv.ReleaseMat(p)
    #cv.ReleaseMat(h)
    return p2h


im_in = cv.LoadImage(sys.argv[1], 4)
#out = cvLoadImage(sys.argv[1], 4)

width = 1093  #800
height = 573  #int(width*(im_in.height/float(im_in.width)))
print 'working on %dx%d' % (width, height)
im = cv.CreateImage((1096, 576), cv.IPL_DEPTH_8U, 1)
cv.Resize(im_in, im)

cv.NamedWindow(WINDOW_NAME, 1)
cv.ShowImage(WINDOW_NAME, im)
cv.SetMouseCallback(WINDOW_NAME, mouse, im)
cv.WaitKey(0)

h**o = calc(im_in.width, im_in.height, im.width, im.height)
cv.Save('homography.cvmat', h**o)

out = cv.CloneImage(im_in)
cv.WarpPerspective(im_in, out, h**o)
out_small = cv.CloneImage(im)
cv.Resize(out, out_small)
cv.ShowImage(WINDOW_NAME, out_small)
cv.WaitKey(0)
Beispiel #4
0
    def run(self):
        frame = cv.QueryFrame(self.capture)  # Capture the first frame
        frame_size = cv.GetSize(
            frame)  # Get the size of the frame in pixels e.g. 640x480
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]

            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

            if len(points):
                center_point = reduce(
                    lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2),
                    points)
                cv.Circle(color_image, center_point, 40,
                          cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 30,
                          cv.CV_RGB(255, 100, 0), 1)
                cv.Circle(color_image, center_point, 20,
                          cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 10,
                          cv.CV_RGB(255, 100, 0), 1)

                xangle = 0
                if center_point[0] < 320:
                    xangle = int(90 + ((center_point[0] - 320) / 22.857))
                elif center_point[0] == 320:
                    xangle = 90
                else:
                    xangle = int(90 + center_point[0] / 22.857)
                yangle = 0
                if center_point[1] < 240:
                    yangle = int(90 + center_point[1] / 12)
                elif center_point[1] == 240:
                    yangle = 93
                else:
                    yangle = int(90 - ((center_point[1] - 240) / 12))

                print "Xangle = ", xangle
                self.firecontrol.sendcoord(0xff, xangle, yangle)
                current_position = center_point
                self.firecontrol.fire(0xfe)
                print "Located at: ", current_position
                #               fire_control.engage()
                #               print "Fire: ", self.pos

                self.pos = center_point
            cv.ShowImage("Target", color_image)
            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
Beispiel #5
0
    def process_frame(self, frame):

        #import pdb
        # pdb.set_trace()

        if self.debug:
            debug = cv.CreateImage(cv.GetSize(frame), 8, 3)
            debug = cv.CloneImage(frame)

        # look for bins (the black rectangles, not the X's and O's)
        rects = libvision.letters.find_bins(frame)

        # record rectangles(bins) found
        cur_bins = [
            Bin(0, (rect.center[0] - frame.width / 2,
                    frame.height / 2 - rect.center[1]), rect.theta, rect.area)
            for rect in rects
        ]

        # detect correctly colored regions
        binary = libvision.cmodules.target_color_rgb.find_target_color_rgb(
            frame, 250, 0, 0, 500, 800, .3)

        # clean up the color detect
        cv.Dilate(binary, binary, None, 1)
        cv.Erode(binary, binary, None, 1)

        # collect blobs
        blob_indexed = cv.CreateImage(cv.GetSize(binary), 8, 1)
        blobs = libvision.blob.find_blobs(binary, blob_indexed, 50, 2)

        # check blobs for letters
        for i, blob in enumerate(blobs):
            # check if the blob is a letter
            letter = libvision.cmodules.shape_detect.match_letters(
                blob_indexed, i + 1, blob.centroid[0], blob.centroid[1],
                blob.roi[0], blob.roi[1], blob.roi[2], blob.roi[3])

            if (not letter):
                continue

            # check to see if this letter is within a known bin
            letter_placed = False
            for a_bin in cur_bins:
                radius = int(math.sqrt(a_bin.area / 2) / 2)
                x_dif = (blob.centroid[0] - a_bin.center[0])**2
                y_dif = (blob.centroid[1] - a_bin.center[1])**2
                tot_dif = math.sqrt(x_dif + y_dif)
                if (tot_dif < radius):
                    # this letter is in a rectangle!
                    a_bin.type = letter
                    letter_placed = True
                    break

            if (not letter_placed):
                # make a new bin for this floating letter
                cur_bins.append(
                    Bin(letter, (blob.centroid[0] - frame.width / 2,
                                 frame.height / 2 - blob.centroid[1]), 0, 0))

        # Compare Bins
        # --- tune-able values --- #
        max_travel = 60
        missing_travel = 100
        timeout_inc = 10
        promo_req = 25
        timeout_dec = 5
        timeout_cap = 50
        type_count_thresh = 3
        missing_timeout = 200
        # ------------------------ #

        # decide if we've seen any current bin before
        for a_bin in cur_bins:
            bin_recognized = False

            # check known bin bins
            for known_bin in self.known_bins:
                # compute distance between bins
                x_dif = a_bin.center[0] - known_bin.center[0]
                y_dif = a_bin.center[1] - known_bin.center[1]
                tot_dif = math.sqrt(x_dif**2 + y_dif**2)

                if (tot_dif < max_travel):
                    # update this known_bin
                    known_bin.timeout += timeout_inc
                    known_bin.type_counts[a_bin.type] += 1
                    known_bin.center = a_bin.center
                    known_bin.angle = a_bin.angle
                    if (a_bin.area):
                        known_bin.area = a_bin.area
                    bin_recognized = True
                    break

            if (bin_recognized):
                continue

            # check missing bins
            for missing_bin in self.missing:
                # compute distance between bins
                x_dif = a_bin.center[0] - missing_bin.center[0]
                y_dif = a_bin.center[1] - missing_bin.center[1]
                tot_dif = math.sqrt(x_dif**2 + y_dif**2)

                if (tot_dif < missing_travel):
                    # re-instate this missing bin as a candidate
                    missing_bin.timeout = 10
                    self.candidates.append(missing_bin)
                    self.missing.remove(missing_bin)
                    bin_recognized = True

            if (bin_recognized):
                continue

            # not a known bin, check candidates
            for candidate in self.candidates:
                # compute distance between bins
                x_dif = a_bin.center[0] - candidate.center[0]
                y_dif = a_bin.center[1] - candidate.center[1]
                tot_dif = math.sqrt(x_dif**2 + y_dif**2)

                if (tot_dif < max_travel):
                    # update this candidate
                    candidate.timeout += timeout_inc
                    candidate.type_counts[a_bin.type] += 1
                    candidate.center = a_bin.center
                    candidate.angle - a_bin.angle
                    if (a_bin.area):
                        candidate.area = a_bin.area
                    bin_recognized = True
                    break

            if (bin_recognized):
                continue

            # add this bin as a new candidate
            a_bin.type_counts[a_bin.type] += 1
            a_bin.type = 0
            self.candidates.append(a_bin)

        # promote / time out candidates
        for candidate in self.candidates:
            candidate.timeout -= timeout_dec
            if (candidate.timeout <= 0):
                if (candidate.id):
                    candidate.timeout = missing_timeout
                    self.missing.append(candidate)
                self.candidates.remove(candidate)
                continue
            if (candidate.timeout >= promo_req):
                if (candidate.id == 0):
                    self.bins_seen += 1
                    candidate.id = self.bins_seen
                self.known_bins.append(candidate)
                self.candidates.remove(candidate)

        # handle timeout of known bins
        for known_bin in self.known_bins:
            # select max type
            # if greater than type thresh
            # assign type
            if (not known_bin.type):
                for i, value in enumerate(known_bin.type_counts):
                    if (i and value >= type_count_thresh):
                        known_bin.type = i

            # decrement timeout
            known_bin.timeout -= timeout_dec
            # remove timed-out bins
            if (known_bin.timeout <= 0):
                known_bin.timeout = missing_timeout
                self.missing.append(known_bin)
                self.known_bins.remove(known_bin)
                continue
            # cap timeout
            if (known_bin.timeout > timeout_cap):
                known_bin.timeout = timeout_cap

        # decrement timeouts on missing bins
        for missing_bin in self.missing:
            missing_bin.timeout -= timeout_dec
            if (missing_bin.timeout <= 0):
                self.missing.remove(missing_bin)

        if self.debug:
            '''
            #draw circles to mark bins
            for a_bin in cur_bins:
                if(a_bin.area == 0):
                    continue
                radius = int( math.sqrt(a_bin.area/2) / 2);
                if(a_bin.type == 0):
                    bin_color = (255,0,255)
                elif(a_bin.type == 1):
                    bin_color = (0,255,0)
                elif(a_bin.type == 2):
                    bin_color = (0,0,255)
                cv.Circle(debug,a_bin.center,radius,bin_color,2,8,0)
                '''
            # draw circles to mark bins
            for a_bin in self.known_bins:
                if (a_bin.area == 0):
                    radius = 20
                else:
                    radius = int(math.sqrt(a_bin.area / 2) / 2)
                if (a_bin.type == 0):
                    bin_color = (255, 0, 255)
                elif (a_bin.type == 1):
                    bin_color = (0, 255, 0)
                elif (a_bin.type == 2):
                    bin_color = (0, 0, 255)
                tmp_center = (int(a_bin.center[0] + frame.width / 2),
                              int(frame.height / 2 - a_bin.center[1]))
                cv.Circle(debug, tmp_center, radius, bin_color, 2, 8, 0)

                if (a_bin.id > 5):
                    self.bins_seen -= 5
                    a_bin.id -= 5
                if (a_bin.id == 1):
                    id_color = (0, 0, 0)
                elif (a_bin.id == 2):
                    id_color = (255, 255, 255)
                elif (a_bin.id == 3):
                    id_color = (120, 120, 120)
                elif (a_bin.id == 4):
                    id_color = (255, 0, 0)
                elif (a_bin.id == 5):
                    id_color = (0, 255, 255)
                else:
                    continue
                cv.Circle(debug, tmp_center, radius - 2, id_color, 2, 8, 0)

            # cv.ShowImage("Binary",binary)
            # cv.ShowImage("Bins",bins)
            # cv.ShowImage("Filtered",filtered)
            cv.ShowImage("Python Debug", debug)
        # populate self.output with infos
        self.output.see_bin = False

        self.return_output()
Beispiel #6
0
grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
test=cv.CreateImage(cv.GetSize(frame),8,3)
img2=cv.CreateImage(cv.GetSize(frame),8,3)
cv.NamedWindow("Real",0)
cv.NamedWindow("Threshold",0)
    #cv.NamedWindow("Final",0)
while(1):
    color_image = cv.QueryFrame(capture)
    imdraw=cv.CreateImage(cv.GetSize(frame),8,3)
    cv.SetZero(imdraw)
    cv.Flip(color_image,color_image,1)
    cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)
    imgyellowthresh=getthresholdedimg(color_image)
    cv.Erode(imgyellowthresh,imgyellowthresh,None,3)
    cv.Dilate(imgyellowthresh,imgyellowthresh,None,10)
    img2=cv.CloneImage(imgyellowthresh)
    storage = cv.CreateMemStorage(0)
    contour = cv.FindContours(imgyellowthresh, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
    points = []

    while contour:
        # Draw bounding rectangles
        bound_rect = cv.BoundingRect(list(contour))
        contour = contour.h_next()

        # for more details about cv.BoundingRect,see documentation
        pt1 = (bound_rect[0], bound_rect[1])
        pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
        points.append(pt1)
        points.append(pt2)
        cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 2)
Beispiel #7
0

names = [
    "../c/pic1.png", "../c/pic2.png", "../c/pic3.png", "../c/pic4.png",
    "../c/pic5.png", "../c/pic6.png"
]

if __name__ == "__main__":
    # create memory storage that will contain all the dynamic data
    storage = cv.CreateMemStorage(0)
    for name in names:
        img0 = cv.LoadImage(name, 1)
        if not img0:
            print "Couldn't load %s" % name
            continue
        img = cv.CloneImage(img0)
        # create window and a trackbar (slider) with parent "image" and set callback
        # (the slider regulates upper threshold, passed to Canny edge detector)
        cv.NamedWindow(wndname, 1)
        cv.CreateTrackbar("canny thresh", wndname, thresh, 1000, on_trackbar)
        # force the image processing
        on_trackbar(0)
        # wait for key.
        # Also the function cv.WaitKey takes care of event processing
        c = cv.WaitKey(0) % 0x100
        # clear memory storage - reset free space position
        cv.ClearMemStorage(storage)
        if (c == '\x1b'):
            break
    cv.DestroyWindow(wndname)
Beispiel #8
0
import numpy
import cv

capture = cv.CaptureFromCAM(0)
imageSize = 1280, 720

h = 0
s = 0
v = 0

channels3 = cv.CreateImage(imageSize, 8, 3)
channels1 = cv.CreateImage(imageSize, 8, 1)

while True:
    frame = cv.QueryFrame(capture)
    clone = cv.CloneImage(frame)
    hsv = cv.CloneImage(channels3)
    threshold = cv.CloneImage(channels1)
    threshold2 = cv.CloneImage(channels1)

    cv.CvtColor(clone, hsv, cv.CV_BGR2HSV)
    cv.InRangeS(hsv, (110, 120, 75), (140, 255, 255), threshold)
    cv.InRangeS(hsv, (110, 120, 75), (140, 255, 255), threshold2)
    cv.Add(threshold, threshold2, threshold)
    cv.Erode(threshold, threshold, iterations=5)
    cv.Dilate(threshold, threshold, iterations=5)

    cv.ShowImage("normal", frame)

    cv.ShowImage("color", threshold)
Beispiel #9
0
    def __init__(self):
        self.threshold_value = THRESH  # value associated with slider bar
        self.capture = cv.CaptureFromCAM(0)
        cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
        cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

        self.hc = cv.Load(HAARCASCADE)
        self.ms = cv.CreateMemStorage()

        self.orig = cv.QueryFrame(self.capture)
        if not self.orig:
            print "can't get frame, check camera"
            sys.exit(2)

        self.width = self.orig.width
        self.height = self.orig.height
        self.size = (self.width, self.height)
        self.smallwidth = int(self.width / SCALING)
        self.smallheight = int(self.height / SCALING)
        self.smallsize = (self.smallwidth, self.smallheight)

        # alloc mem for images
        self.small = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.visualize = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.bw = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.hsv = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.hue = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.sat = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.val = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.bp = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.scaled = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.th = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.morphed = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.temp = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)
        self.temp3 = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.result = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.hist_image = cv.CreateImage((320, 200), cv.IPL_DEPTH_8U, 1)
        self.scaled_c = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.hue_c = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.sat_c = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.th_c = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)
        self.morphed_c = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 3)

        # Greyscale image, thresholded to create the motion mask:
        self.grey_image = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        self.running_average_image = cv.CreateImage(self.smallsize,
                                                    cv.IPL_DEPTH_32F, 3)
        # ...but the AbsDiff() function requires matching image depths:
        self.running_average_in_display_color_depth = cv.CloneImage(self.small)

        # RAM used by FindContours():
        self.mem_storage = cv.CreateMemStorage(0)
        print "here"

        # The difference between the running average and the current frame:
        self.difference = cv.CloneImage(self.small)

        self.target_count = 1
        self.last_target_count = 1
        self.last_target_change_t = 0.0
        self.k_or_guess = 1
        self.codebook = []
        self.frame_count = 0
        self.last_frame_entity_list = []

        # make matrix for erode/dilate
        MORPH_SIZE = 3
        center = (MORPH_SIZE / 2) + 1
        self.morpher_small = cv.CreateStructuringElementEx(
            MORPH_SIZE, MORPH_SIZE, center, center, cv.CV_SHAPE_ELLIPSE)
        # self.morpher_small = cv.CreateStructuringElementEx(cols=MORPH_SIZE, rows=MORPH_SIZE, anchor_x=center, anchor_y=center, shape=cv.CV_SHAPE_ELLIPSE)
        MORPH_SIZE = 11
        center = (MORPH_SIZE / 2) + 1
        self.morpher = cv.CreateStructuringElementEx(MORPH_SIZE, MORPH_SIZE,
                                                     center, center,
                                                     cv.CV_SHAPE_ELLIPSE)

        # alloc mem for histogram
        self.hist = cv.CreateHist([HUEBINS, SATBINS], cv.CV_HIST_ARRAY,
                                  [[0, 180], [0, 255]], 1)

        # initalize
        #cv.CvtColor(self.small, self.bw, cv.CV_BGR2GRAY)
        #cv.CvtColor(self.small, self.hsv, cv.CV_BGR2HSV)
        #cv.CalcArrHist([self.hue, self.sat], self.hist)

        # video writer
        if STORE:
            self.writer = cv.CreateVideoWriter(OUTPUT,
                                               cv.CV_FOURCC(
                                                   'M', 'J', 'P', 'G'),
                                               15,
                                               cv.GetSize(self.combined),
                                               is_color=1)

        # make window
        cv.NamedWindow('Skin Detection')
        cv.CreateTrackbar('Threshold', 'Skin Detection', self.threshold_value,
                          255, self.change_threshold)
cv.NamedWindow("Regiao de Interesse", 1)
cv.MoveWindow("Regiao de Interesse", 1000, 480)
cv.MoveWindow("Mascara", 0, 500)
cv.MoveWindow("Binario", 400, 500)

arquivo = open('Treino.txt', 'a')

mascara = cv.CreateImage((resolucao_largura, resolucao_altura), 8, 3)
cinza = cv.CreateImage((resolucao_largura, resolucao_altura), 8, 1)

while True:
    print("Por Favor tire uma foto do fundo estatico do seu video.")
    print("Aperte a tecla espaco.")
    if cv.WaitKey(0) % 0x100 == 32:
        primeiraImagem = cv.QueryFrame(captura)
        fundo = cv.CloneImage(primeiraImagem)
        cv.Smooth(fundo, fundo, cv.CV_GAUSSIAN, filtro_de_gauss)
        print("Tirou uma Foto !")
        break
    else:
        print "Uma foto do fundo nao foi tirada"
        break

while True:

    imagem = cv.QueryFrame(captura)
    cv.Smooth(imagem, imagem, cv.CV_GAUSSIAN, filtro_de_gauss)
    maiorArea = 0
    listaContornos = []
    listaVertices = []
    quadrante = []
Beispiel #11
0
def FindObject(frame):
    global old_frame
    global gftt_list
    global weights
    global existence

    if not MovingHead():
        try:
            mask = FrameMask(old_frame, frame)
        except:
            old_frame = cv.CloneImage(frame)
            gftt_list = list()
            return None, None, False
    else:
        old_frame = cv.CloneImage(frame)
        gftt_list = list()
        return None, None, False

    if mask == None:
        gftt_list = list()
        print "2"
        return None, None, False

    ## Find Good Features to track
    if len(gftt_list) < 300:
        #gftt_list.append((GoodFeaturesToTrack(old_frame, mask),1))
        gftt_new, weights_new, existence_new = GoodFeaturesToTrack(
            old_frame, mask)

        if gftt_new != None:
            gftt_list = gftt_list + gftt_new
            weights = weights + weights_new
            existence = existence + existence_new

    gftt_list_new, weights, existence = OpticalFlow(frame, old_frame,
                                                    gftt_list, weights,
                                                    existence)
    weights, existence = UpdatePointWeights(gftt_list_new, gftt_list, weights,
                                            existence)

    gftt_list = gftt_list_new

    gftt_list, weights, existence = DropPoints(gftt_list, weights, existence)
    gftt_img = DrawPoints(frame, gftt_list)

    if len(gftt_list) > 30:
        loc_obj = list()
        loc_obj = AvgPoint(gftt_list, 1)
        cv.Circle(gftt_img, loc_obj, 4, 255, 4, 8, 0)
        convrad = 0.55 / (frame.width / 2)
        loc_obj = list(loc_obj)
        loc_obj[0] = (loc_obj[0] - (frame.width / 2)) * convrad
        loc_obj[1] = (loc_obj[1] - (frame.height / 2)) * convrad
    else:
        loc_obj = (None, None)
    cv.ShowImage("Good Features", gftt_img)
    cv.ShowImage("Difference", mask)
    cv.Copy(frame, old_frame)
    if MovingHead():
        print "Object Location = 0"
        loc_obj[0] = 0
        loc_obj[1] = 0
        gftt_list = list()
        old_frame = 0
    return loc_obj[0], loc_obj[1], True
Beispiel #12
0
    def run(self):
        # Initialize
        #log_file_name = "tracker_output.log"
        #log_file = file( log_file_name, 'a' )

        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame),
                                               cv.IPL_DEPTH_32F, 3)
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)

        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)

        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)

        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []

        t0 = time.time()

        # For toggling display:
        image_list = ["camera", "difference", "threshold", "display", "faces"]
        image_index = 0  # Index into image_list

        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1,
                                cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        ###############################
        ### Face detection stuff
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        haar_cascade = cv.Load('haarcascades/haarcascade_frontalface_alt.xml')
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )

        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 3

        while True:

            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)

            frame_count += 1
            frame_t0 = time.time()

            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)

            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image,
                            running_average_in_display_color_depth, 1.0, 0.0)

            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth,
                       difference)

            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)
            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)

            grey_image_as_array = numpy.asarray(cv.GetMat(grey_image))
            non_black_coords_array = numpy.where(grey_image_as_array > 3)
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1],
                                         non_black_coords_array[0])

            points = [
            ]  # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []

            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours(grey_image, mem_storage,
                                      cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            while contour:

                bounding_rect = cv.BoundingRect(list(contour))
                point1 = (bounding_rect[0], bounding_rect[1])
                point2 = (bounding_rect[0] + bounding_rect[2],
                          bounding_rect[1] + bounding_rect[3])

                bounding_box_list.append((point1, point2))
                polygon_points = cv.ApproxPoly(list(contour), mem_storage,
                                               cv.CV_POLY_APPROX_DP)

                # To track polygon points only (instead of every pixel):
                #points += list(polygon_points)

                # Draw the contours:
                ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly(grey_image, [
                    list(polygon_points),
                ], cv.CV_RGB(255, 255, 255), 0, 0)
                cv.PolyLine(display_image, [
                    polygon_points,
                ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)

                contour = contour.h_next()

            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append(box_width * box_height)

                #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)

            average_box_area = 0.0
            if len(box_areas):
                average_box_area = float(sum(box_areas)) / len(box_areas)

            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]

                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area * 0.1:
                    trimmed_box_list.append(box)

            # Draw the trimmed box list:
            #for box in trimmed_box_list:
            #	cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )

            bounding_box_list = merge_collided_bboxes(trimmed_box_list)

            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle(display_image, box[0], box[1],
                             cv.CV_RGB(0, 255, 0), 1)

            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len(bounding_box_list)

            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.

            if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1:
                    estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1:
                    estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0

            # Clip to the user-supplied maximum:
            estimated_target_count = min(estimated_target_count, max_targets)

            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.

            # Using the numpy values directly (treating all pixels as points):
            points = non_black_coords_array
            center_points = []

            if len(points):

                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max(estimated_target_count,
                                 1)  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook

                #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans(array(points), k_or_guess)

                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = (int(center_point[0]), int(center_point[1]))
                    center_points.append(center_point)
                    #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)

            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []

            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []

                for center_point in center_points:
                    if center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                     center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :

                        # This point is within the box.
                        center_points_in_box.append(center_point)

                # Now see if there are more than one.  If so, merge them.
                if len(center_points_in_box) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])

                    average_x = int(float(sum(x_list)) / len(x_list))
                    average_y = int(float(sum(y_list)) / len(y_list))

                    trimmed_center_points.append((average_x, average_y))

                    # Record that they were removed:
                    removed_center_points += center_points_in_box

                if len(center_points_in_box) == 1:
                    trimmed_center_points.append(
                        center_points_in_box[0])  # Just use it.

            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (
                        not center_point in removed_center_points):
                    trimmed_center_points.append(center_point)

            # Draw what we found:
            #for center_point in trimmed_center_points:
            #	center_point = ( int(center_point[0]), int(center_point[1]) )
            #	cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #	cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #	cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #	cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)

            # Determine if there are any new (or lost) targets:
            actual_target_count = len(trimmed_center_points)
            last_target_count = actual_target_count

            # Now build the list of physical entities (objects)
            this_frame_entity_list = []

            # An entity is list: [ name, color, last_time_seen, last_known_coords ]

            for target in trimmed_center_points:

                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}

                for entity in last_frame_entity_list:

                    entity_coords = entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]

                    distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                    entity_distance_dict[distance] = entity

                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()

                for distance in distance_list:

                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[distance]

                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                        continue

                    #print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[
                        2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[
                        3] = target  # Update the new location
                    this_frame_entity_list.append(nearest_possible_entity)
                    #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break

                if entity_found == False:
                    # It's a new entity.
                    color = (random.randint(0, 255), random.randint(0, 255),
                             random.randint(0, 255))
                    name = hashlib.md5(str(frame_t0) +
                                       str(color)).hexdigest()[:6]
                    last_time_seen = frame_t0

                    new_entity = [name, color, last_time_seen, target]
                    this_frame_entity_list.append(new_entity)
                    #log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.

            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append(entity)

            # For next frame:
            last_frame_entity_list = this_frame_entity_list

            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10,
                          cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point, 5,
                          cv.CV_RGB(c[0], c[1], c[2]), 3)

            #print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break

            # Toggle which image to show
            if chr(c) == 'd':
                image_index = (image_index + 1) % len(image_list)

            image_name = image_list[image_index]

            # Display frame to user
            if image_name == "camera":
                image = camera_image
                cv.PutText(image, "Camera (Normal)", text_coord, text_font,
                           text_color)
            elif image_name == "difference":
                image = difference
                cv.PutText(image, "Difference Image", text_coord, text_font,
                           text_color)
            elif image_name == "display":
                image = display_image
                cv.PutText(image, "Targets (w/AABBs and contours)", text_coord,
                           text_font, text_color)
            elif image_name == "threshold":
                # Convert the image to color.
                cv.CvtColor(grey_image, display_image, cv.CV_GRAY2RGB)
                image = display_image  # Re-use display image here
                cv.PutText(image, "Motion Mask", text_coord, text_font,
                           text_color)
            elif image_name == "faces":
                # Do face detection
                detect_faces(camera_image, haar_cascade, mem_storage)
                image = camera_image  # Re-use camera image here
                cv.PutText(image, "Face Detection", text_coord, text_font,
                           text_color)

            cv.ShowImage("Target", image)

            if self.writer:
                cv.WriteFrame(self.writer, image)

            #log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta,
                                                  processed_fps)
Beispiel #13
0
    def run(self):
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        run_mean = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        diff = None
        movement = []

        while True:
            # Capture frame from webcam
            color_image = cv.QueryFrame(self.capture)
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if not diff:
                # Initialize
                diff = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, run_mean, 1.0, 0.0)
            else:
                cv.RunningAvg(color_image, run_mean, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(run_mean, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, diff)

            # Convert the image to grayscale.
            cv.CvtColor(diff, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get object blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            # Calculate movements
            store_movement = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, store_movement, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                # Draw rectangles
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

            num_points = len(points)

            if num_points:
                x = 0
                for point in points:
                    x += point[0]
                x /= num_points

                movement.append(x)

            if len(movement) > 0 and numpy.average(numpy.diff(movement[-30:-1])) > 0:
              print 'Left'
            else:
              print 'Right'

            # Display frame to user
            cv.ShowImage("Object Direction of Motion", color_image)

            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if (0xFF & c == 27):
               break
Beispiel #14
0
def main():
    def isimgext(f):
        return os.path.splitext(f)[1].lower() in ('.png', '.tif', '.tiff',
                                                  '.jpg', '.jpeg')

    args = sys.argv[1:]
    imgsdir = args[0]
    vendor = args[1]
    outdir = args[2]
    try:
        N = int(args[3])
    except:
        N = -1
    if 'align' in args:
        # Align the barcodes when computing Min/Max overlays
        do_align = True
    else:
        do_align = False
    if 'do_cpyimg' in args:
        # Copy the entire images to OUTDIR (don't do this for large N!)
        do_cpyimg = True
    else:
        do_cpyimg = False
    if 'just_grouping' in args:
        # Just compute the barcodes + group, don't compute overlays
        just_grouping = True
    else:
        just_grouping = False
    if args[-2] == 'load':
        grouping = pickle.load(open(args[-1], 'rb'))
    else:
        grouping = None
    do_profile = True if 'profile' in args else False

    imgpaths = []
    cnt = 0
    for dirpath, dirnames, filenames in os.walk(imgsdir):
        for imgname in [f for f in filenames if isimgext(f)]:
            if N > 0 and cnt >= N:
                break
            imgpath = os.path.join(dirpath, imgname)
            imgpaths.append(imgpath)
            cnt += 1
        if N > 0 and cnt >= N:
            break
    print "Starting partition_imgs..."
    t = time.time()
    if do_profile:
        cProfile.runctx('partition_imgs(imgpaths, vendor=vendor)', {}, {
            'imgpaths': imgpaths,
            'vendor': vendor,
            'partition_imgs': partition_imgs
        })
        return
    if grouping == None:
        grouping = partask.do_partask(_do_partition_imgs,
                                      imgpaths,
                                      _args=(vendor, None),
                                      combfn="dict",
                                      N=None)
        try:
            os.makedirs(outdir)
        except:
            pass
        pickle.dump(grouping, open(os.path.join(outdir, 'grouping.p'), 'wb'),
                    pickle.HIGHEST_PROTOCOL)

    dur = time.time() - t
    print "...Finished partition_imgs ({0} s).".format(dur)
    print "    Avg. Time per ballot: {0} s".format(dur / len(imgpaths))

    print "Copying groups to outdir {0}...".format(outdir)
    t = time.time()
    errcount = 0
    for barcodes, group in grouping.iteritems():
        if len(group) == 1:
            errcount += 1 if ("ERR0" in barcodes or "ERR1" in barcodes) else 0
            continue
        elif "ERR0" in barcodes or "ERR1" in barcodes:
            #continue
            errcount += len(group)
            pass
        if just_grouping:
            continue
        bcs = '_'.join([thing for thing in barcodes if type(thing) == str])
        rootdir = os.path.join(outdir, bcs)
        try:
            os.makedirs(rootdir)
        except:
            pass
        Imins = [None for _ in barcodes]
        Imaxes = [None for _ in barcodes]
        Irefs = [None for _ in barcodes]

        for i, (imgpath, isflip, bbs) in enumerate(group):
            if do_cpyimg:
                imgname = os.path.split(imgpath)[1]
                outpath_foo = os.path.join(rootdir, imgname)
                shutil.copy(imgpath, outpath_foo)
            img = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_GRAYSCALE)
            if isflip:
                cv.Flip(img, img, flipMode=-1)
            for j, bb in enumerate(bbs):
                outpath = os.path.join(rootdir, str(j),
                                       "{0}_{1}.png".format(i, j))
                try:
                    os.makedirs(os.path.split(outpath)[0])
                except:
                    pass
                x, y, w, h = bb
                cv.SetImageROI(img, (x, y, w, h))
                wbig, hbig = int(round(w * 2.0)), int(round(h * 2.0))
                bcBig = cv.CreateImage((wbig, hbig), img.depth, img.channels)
                cv.Resize(img, bcBig, interpolation=cv.CV_INTER_CUBIC)
                cv.SaveImage(outpath, bcBig)

                if Imins[j] == None:
                    Imins[j] = cv.CloneImage(bcBig)
                    Imaxes[j] = cv.CloneImage(bcBig)
                    if do_align:
                        Irefs[j] = make_overlays.iplimage2np(
                            cv.CloneImage(bcBig)) / 255.0
                else:
                    bcBig_sized = make_overlays.matchsize(bcBig, Imins[j])
                    if do_align:
                        tmp_np = make_overlays.iplimage2np(
                            cv.CloneImage(bcBig_sized)) / 255.0
                        H, Ireg, err = imagesAlign.imagesAlign(tmp_np,
                                                               Irefs[j],
                                                               fillval=0.2,
                                                               rszFac=0.75)
                        Ireg *= 255.0
                        Ireg = Ireg.astype('uint8')
                        bcBig_sized = make_overlays.np2iplimage(Ireg)
                    cv.Min(bcBig_sized, Imins[j], Imins[j])
                    cv.Max(bcBig_sized, Imaxes[j], Imaxes[j])
        for idx, Imin in enumerate(Imins):
            Imax = Imaxes[idx]
            cv.SaveImage(os.path.join(rootdir, "_{0}_minimg.png".format(idx)),
                         Imin)
            cv.SaveImage(os.path.join(rootdir, "_{0}_maximg.png".format(idx)),
                         Imax)

    dur = time.time() - t
    print "...Finished Copying groups to outdir {0} ({1} s).".format(
        outdir, dur)
    print "Number of error ballots:", errcount
    print "Done."
Beispiel #15
0
    def run(self, args):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        #frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        first = True
        #closest_to_left = cv.GetSize(frame)[0]
        #closest_to_right = cv.GetSize(frame)[1]
        while True:


            color_image = cv.QueryFrame(self.capture)
            if not color_image:
                print "END OF FILE"
                break
        
            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, args.motiondelay, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

        
            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            if DEBUG_VISUAL:
                cv.ShowImage("Grey_image: Difference", grey_image)
            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, args.threshold, 255, cv.CV_THRESH_BINARY)
            if DEBUG_VISUAL:
                cv.ShowImage("Grey_image: Black n White", grey_image)
#
            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, args.dilate)
            if DEBUG_VISUAL:
                cv.ShowImage("Dilate", grey_image)
            cv.Erode(grey_image, grey_image, None, args.erode)
            if DEBUG_VISUAL:
                cv.ShowImage("Erode", grey_image)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            
            counter = 0
            while contour:
                counter += 1
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)
            print counter
            
            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
Beispiel #16
0
def showImage(svs,img):
        dst = cv.CloneImage(img.image)
Beispiel #17
0
old_time = time.time()

intrinsics = cv.Load("Intrinsics.xml")
distortion = cv.Load("Distortion.xml")

image = cv.QueryFrame(cam)
mapx = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1)
mapy = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1)
cv.InitUndistortMap(intrinsics, distortion, mapx, mapy)

while (True):
    start = time.time()
    image = cv.QueryFrame(cam)

    processed = cv.CloneImage(image)
    cv.Remap(image, processed, mapx, mapy)
    crop_rect = (0, 63, 640, 350)
    cv.SetImageROI(processed, crop_rect)

    ball_center = find_object(processed, "RED")
    ball_center = (int(ball_center[0][0]) + 8, int(ball_center[0][1]) + 8)
    blue_center = find_object(processed, "BLUE")
    blue_center = ((int(blue_center[0][0]) + 8, int(blue_center[0][1]) + 12),
                   blue_center[1])
    yellow_center = find_object(processed, "YELLOW")
    yellow_center = ((int(yellow_center[0][0]) + 8,
                      int(yellow_center[0][1]) + 12), yellow_center[1])

    center_points = (ball_center, blue_center, yellow_center)
    draw_on_image(processed, center_points)
Beispiel #18
0
    def process_frame(self, frame):

        # Resize image to 320x240
        #copy = cv.CreateImage(cv.GetSize(frame), 8, 3)
        #cv.Copy(frame, copy)
        #cv.SetImageROI(frame, (0, 0, 320, 240))
        #cv.Resize(copy, frame, cv.CV_INTER_NN)

        found_hedge = False

        test_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)

        cv.Copy(frame, test_frame)

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have value channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 2)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology

        kernel = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE)
        #cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 4)

        if self.debug:
            color_filtered = cv.CloneImage(binary)

        # Get Edges
        #cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        '''
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD,
            rho=1,
            theta=math.pi/180,
            threshold=self.hough_threshold,
            param1=0,
            param2=0
        )
        '''
        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_PROBABILISTIC,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=self.min_length,
                                   param2=self.max_gap)

        self.hor_lines = []

        for line in raw_lines:
            slope = line_slope(line[0], line[1])
            if slope is None:
                continue
            if math.fabs(line_slope(line[0], line[1])) < self.hor_threshold:
                self.hor_lines.append(line)

        max_length = 0

        for line in self.hor_lines:
            print line
            if math.fabs(line_distance(line[0], line[1])) > max_length:
                max_length = math.fabs(line_distance(line[0], line[1]))
                crossbar_seg = line
        '''
        # Get vertical lines
        vertical_lines = []
        for line in raw_lines:
            if line[1] < self.vertical_threshold or \
                line[1] > math.pi-self.vertical_threshold:

                vertical_lines.append( (abs(line[0]), line[1]) )

        # Group vertical lines
        vertical_line_groups = []  # A list of line groups which are each a line list
        for line in vertical_lines:
            group_found = False
            for line_group in vertical_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                vertical_line_groups.append([line])

        # Average line groups into lines
        vertical_lines = []
        for line_group in vertical_line_groups:
            rhos = map(lambda line: line[0], line_group)
            angles = map(lambda line: line[1], line_group)
            line = (sum(rhos)/len(rhos), circular_average(angles, math.pi))
            vertical_lines.append(line)

        # Get horizontal lines
        horizontal_lines = []
        for line in raw_lines:
            dist_from_horizontal = (math.pi/2 + line[1]) % math.pi
            if dist_from_horizontal < self.horizontal_threshold or \
                dist_from_horizontal > math.pi-self.horizontal_threshold:

                horizontal_lines.append( (abs(line[0]), line[1]) )

        # Group horizontal lines
        horizontal_line_groups = []  # A list of line groups which are each a line list
        for line in horizontal_lines:
            group_found = False
            for line_group in horizontal_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                horizontal_line_groups.append([line])

        if len(horizontal_line_groups) is 1:
            self.seen_crossbar = True
            rhos = map(lambda line: line[0], horizontal_line_groups[0])
            angles = map(lambda line: line[1], horizontal_line_groups[0])
            line = (sum(rhos)/len(rhos), circular_average(angles, math.pi))
            horizontal_lines = [line]
        else:
            self.seen_crossbar = False
            horizontal_lines = []

        self.left_pole = None
        self.right_pole = None
        if len(vertical_lines) is 2:
            roi = cv.GetImageROI(frame)
            width = roi[2]
            height = roi[3]
            self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2
            self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2
        #TODO: If one pole is seen, is it left or right pole?

        # Calculate planar distance r (assuming we are moving perpendicular to
        # the hedge)
        if self.left_pole and self.right_pole:
            theta = abs(self.left_pole - self.right_pole)
            self.r = 3 / tan(radians(theta/2))
        else:
            self.r = None

        if self.r and self.seen_crossbar:
            bar_phi = (-1*horizontal_lines[0][0] + frame.height/2) / (frame.height/2) * 32
            self.crossbar_depth = self.r * atan(radians(bar_phi))
        else:
            self.crossbar_depth = None
        '''
        self.left_pole = None
        self.right_pole = None
        self.seen_crossbar = False
        self.crossbar_depth = None

        if self.debug and max_length != 0:
            cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB)

            #libvision.misc.draw_lines(frame, vertical_lines)
            #libvision.misc.draw_lines(frame, horizontal_lines)
            # for line in raw_lines:
            #    cv.Line(frame,line[0],line[1], (255,255,0), 10, cv.CV_AA, 0)
            #    cv.Circle(frame, line[1], 15, (255,0,0), 2,8,0)
            # print len(raw_lines)
            #cv.ShowImage("Hedge", cv.CloneImage(frame))
            if (crossbar_seg[0][0] - frame.width / 2) * 37 / (
                    frame.width / 2) < (crossbar_seg[1][0] - frame.width /
                                        2) * 37 / (frame.width / 2):
                self.left_pole = round((crossbar_seg[0][0] - frame.width / 2) *
                                       37 / (frame.width / 2))
                self.right_pole = round(
                    (crossbar_seg[1][0] - frame.width / 2) * 37 /
                    (frame.width / 2))
            else:
                self.left_pole = round((crossbar_seg[1][0] - frame.width / 2) *
                                       37 / (frame.width / 2))
                self.right_pole = round(
                    (crossbar_seg[0][0] - frame.width / 2) * 37 /
                    (frame.width / 2))
            self.crossbar_depth = round(
                -1 * (crossbar_seg[1][1] - frame.height / 2) * 36 /
                (frame.height / 2))

            if math.fabs(self.left_pole) <= 37 and math.fabs(
                    self.left_pole) >= self.frame_boundary_thresh:
                self.left_pole = None
            if math.fabs(self.right_pole) <= 37 and math.fabs(
                    self.right_pole) >= self.frame_boundary_thresh:
                self.right_pole = None

            self.seen_crossbar = True

            if self.left_pole and self.right_pole:

                self.returning = (self.left_pole + self.right_pole) / 2
                print "Returning ", self.returning

                if self.last_seen < 0:
                    self.last_center = None
                    self.last_seen = 0
                if self.last_center is None:
                    self.last_center = self.returning
                    self.seen_count = 1
                elif math.fabs(self.last_center -
                               self.returning) < self.center_trans_thresh:
                    self.seen_count += 1
                    self.last_seen += 2
                else:
                    self.last_seen -= 1

                if self.seen_count < self.seen_count_thresh:
                    self.left_pole = None
                    self.right_pole = None
                else:
                    print "FOUND CENTER AND RETURNED IT"
                    self.found = True
            else:
                self.returning = 0
                if self.last_seen < 0:
                    self.last_center = None
                    self.last_seen = 0
                self.last_seen -= 1
                self.left_pole = None
                self.right_pole = None

            cv.Line(frame, crossbar_seg[0], crossbar_seg[1], (255, 255, 0), 10,
                    cv.CV_AA, 0)
            if self.left_pole and crossbar_seg[0][0] < crossbar_seg[1][0]:

                cv.Line(frame, crossbar_seg[0],
                        (crossbar_seg[0][0], crossbar_seg[0][0] - 500),
                        (255, 0, 0), 10, cv.CV_AA, 0)
            elif self.left_pole:
                cv.Line(frame, crossbar_seg[1],
                        (crossbar_seg[1][0], crossbar_seg[1][1] - 500),
                        (255, 0, 0), 10, cv.CV_AA, 0)

            if self.right_pole and crossbar_seg[0][0] > crossbar_seg[1][0]:

                cv.Line(frame, crossbar_seg[0],
                        (crossbar_seg[0][0], crossbar_seg[0][0] - 500),
                        (255, 0, 0), 10, cv.CV_AA, 0)
            elif self.right_pole:
                cv.Line(frame, crossbar_seg[1],
                        (crossbar_seg[1][0], crossbar_seg[1][1] - 500),
                        (255, 0, 0), 10, cv.CV_AA, 0)

            # populate self.output with infos
            self.output.seen_crossbar = self.seen_crossbar
            self.output.left_pole = self.left_pole
            self.output.right_pole = self.right_pole
            #self.output.r = self.r
            self.output.crossbar_depth = self.crossbar_depth

            self.return_output()
            print self
        else:
            cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB)

        svr.debug("Hedge", cv.CloneImage(frame))
        svr.debug("Hedge2", test_frame)
Beispiel #19
0
cv.Save("Distortion.xml", distortion_coefficient)
# Loading from xml files
intrinsic = cv.Load("Intrinsics.xml")
distortion = cv.Load("Distortion.xml")
print " loaded all distortion parameters"

mapx = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1)
mapy = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1)
cv.InitUndistortMap(intrinsic, distortion, mapx, mapy)
cv.NamedWindow("Undistort")
print "all mapping completed"
print "Now relax for some time"
time.sleep(8)

print "now get ready, camera is switching on"
while (1):
    image = cv.QueryFrame(capture)
    t = cv.CloneImage(image)
    cv.ShowImage("Calibration", image)
    cv.Remap(t, image, mapx, mapy)
    cv.ShowImage("Undistort", image)
    c = cv.WaitKey(33)
    if (c == 1048688):  # enter 'p' key to pause for some time
        cv.WaitKey(2000)
    elif c == 1048603:  # enter esc key to exit
        break

print "everything is fine"

###############################################################################################
orig = cv.LoadImage('./demo1.jpg', cv.CV_LOAD_IMAGE_COLOR)
im = cv.CreateImage(cv.GetSize(orig), 8, 1)
cv.CvtColor(orig, im, cv.CV_BGR2GRAY)
#Keep the original in colour to draw contours in the end

cv.Threshold(im, im, 128, 255, cv.CV_THRESH_BINARY)
cv.ShowImage("Threshold 1", im)
cv.SaveImage("threshold1.jpg",im)

element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5, cv.CV_SHAPE_RECT)

cv.MorphologyEx(im, im, None, element, cv.CV_MOP_OPEN) #Open and close to make appear contours
cv.MorphologyEx(im, im, None, element, cv.CV_MOP_CLOSE)
cv.Threshold(im, im, 128, 255, cv.CV_THRESH_BINARY_INV)
cv.ShowImage("After MorphologyEx", im)
cv.SaveImage("after.jpg",im)
# --------------------------------

vals = cv.CloneImage(im) #Make a clone because FindContours can modify the image
contours=cv.FindContours(vals, cv.CreateMemStorage(0), cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0,0))

_red = (0, 0, 255); #Red for external contours
_green = (0, 255, 0);# Gren internal contours
levels=2 #1 contours drawn, 2 internal contours as well, 3 ...
co=cv.DrawContours (orig, contours, _red, _green, levels, 2, cv.CV_FILLED) #Draw contours on the colour image
cv.SaveImage("save.jpg",orig)
# cv.SaveImage("co.jpg",co)

cv.ShowImage("Image", orig)

cv.WaitKey(0)
Beispiel #21
0
def findSquares4(img, storage):
    N = 11
    sz = (img.width & -2, img.height & -2)
    timg = cv.CloneImage(img)
    # make a copy of input image
    gray = cv.CreateImage(sz, 8, 1)
    pyr = cv.CreateImage((sz.width / 2, sz.height / 2), 8, 3)
    # create empty sequence that will contain points -
    # 4 points per square (the square's vertices)
    squares = cv.CreateSeq(0, sizeof_CvSeq, sizeof_CvPoint, storage)
    squares = CvSeq_CvPoint.cast(squares)

    # select the maximum ROI in the image
    # with the width and height divisible by 2
    subimage = cv.GetSubRect(timg, cv.Rect(0, 0, sz.width, sz.height))

    # down-scale and upscale the image to filter out the noise
    cv.PyrDown(subimage, pyr, 7)
    cv.PyrUp(pyr, subimage, 7)
    tgray = cv.CreateImage(sz, 8, 1)
    # find squares in every color plane of the image
    for c in range(3):
        # extract the c-th color plane
        channels = [None, None, None]
        channels[c] = tgray
        cv.Split(subimage, channels[0], channels[1], channels[2], None)
        for l in range(N):
            # hack: use Canny instead of zero threshold level.
            # Canny helps to catch squares with gradient shading
            if (l == 0):
                # apply Canny. Take the upper threshold from slider
                # and set the lower to 0 (which forces edges merging)
                cv.Canny(tgray, gray, 0, thresh, 5)
                # dilate canny output to remove potential
                # holes between edge segments
                cv.Dilate(gray, gray, None, 1)
            else:
                # apply threshold if l!=0:
                #     tgray(x, y) = gray(x, y) < (l+1)*255/N ? 255 : 0
                cv.Threshold(tgray, gray, (l + 1) * 255 / N, 255,
                             cv.CV_THRESH_BINARY)

            # find contours and store them all as a list
            count, contours = cv.FindContours(gray, storage, sizeof_CvContour,
                                              cv.CV_RETR_LIST,
                                              cv.CV_CHAIN_APPROX_SIMPLE,
                                              (0, 0))

            if not contours:
                continue

            # test each contour
            for contour in contours.hrange():
                # approximate contour with accuracy proportional
                # to the contour perimeter
                result = cv.ApproxPoly(contour, sizeof_CvContour, storage,
                                       cv.CV_POLY_APPROX_DP,
                                       cv.ContourPerimeter(contours) * 0.02, 0)
                # square contours should have 4 vertices after approximation
                # relatively large area (to filter out noisy contours)
                # and be convex.
                # Note: absolute value of an area is used because
                # area may be positive or negative - in accordance with the
                # contour orientation
                if (result.total == 4 and abs(cv.ContourArea(result)) > 1000
                        and cv.CheckContourConvexity(result)):
                    s = 0
                    for i in range(5):
                        # find minimum angle between joint
                        # edges (maximum of cosine)
                        if (i >= 2):
                            t = abs(
                                angle(result[i], result[i - 2], result[i - 1]))
                            if s < t:
                                s = t
                    # if cosines of all angles are small
                    # (all angles are ~90 degree) then write quandrange
                    # vertices to resultant sequence
                    if (s < 0.3):
                        for i in range(4):
                            squares.append(result[i])

    return squares
Beispiel #22
0
def VirtualMirror():
    cv.NamedWindow("RGB_remap", cv.CV_WINDOW_NORMAL)
    cv.NamedWindow("Depth_remap", cv.CV_WINDOW_AUTOSIZE)
    cv.NamedWindow('dst', cv.CV_WINDOW_NORMAL)
    cv.SetMouseCallback("Depth_remap", on_mouse, None)
    print "Virtual Mirror"
    print "Calibrated 4 Screen corner= ", sn4_ref
    print "Corner 1-2 = ", np.linalg.norm(sn4_ref[0] - sn4_ref[1])
    print "Corner 2-3 = ", np.linalg.norm(sn4_ref[1] - sn4_ref[2])
    print "Corner 3-4 = ", np.linalg.norm(sn4_ref[2] - sn4_ref[3])
    print "Corner 4-1 = ", np.linalg.norm(sn4_ref[3] - sn4_ref[0])
    global head_pos
    global head_virtual
    global scene4_cross
    head_pos = np.array([-0.2, -0.2, 1.0])  #Head_detect()

    while 1:
        (depth, _) = freenect.sync_get_depth()
        (rgb, _) = freenect.sync_get_video()
        #print type(depth)
        img = array2cv(rgb[:, :, ::-1])
        im = array2cv(depth.astype(np.uint8))
        #modulize this part for update_on() and loopcv()
        #q = depth
        X, Y = np.meshgrid(range(640), range(480))
        d = 2  #downsampling if need
        projpts = calibkinect.depth2xyzuv(depth[::d, ::d], X[::d, ::d],
                                          Y[::d, ::d])
        xyz, uv = projpts

        if tracking == 0:
            #*********************************
            if pt is not None:
                print "=================="
                (x_d, y_d) = pt
                print "x=", x_d, " ,y=", y_d
                #print depth.shape
                #Watch out the indexing for depth col,row = 480,640
                d_raw = np.array([depth[y_d, x_d]])
                u_d = np.array([x_d])
                v_d = np.array([y_d])

                print "d_raw= ", d_raw
                print "u_d= ", u_d
                print "v_d= ", v_d
                head3D, head2D = calibkinect.depth2xyzuv(d_raw, u_d, v_d)
                print "XYZ=", head3D
                print "XYZonRGBplane=", head2D

                head_pos = head3D[0]
                #print "head_pos.shape",head_pos.shape
                print "head_pos= ", head_pos
                cv.WaitKey(100)
                cv.Circle(im, (x_d, y_d), 4, (0, 0, 255, 0), -1, 8, 0)
                cv.Circle(im, (int(head2D[0, 0]), int(head2D[0, 1])), 2,
                          (255, 255, 255, 0), -1, 8, 0)

            #*********************************
        elif tracking == 1:
            #find the nearest point (nose) as reference for right eye position
            print "nose"
            inds = np.nonzero(xyz[:, 2] > 0.5)
            #print xyz.shape
            new_xyz = xyz[inds]
            #print new_xyz.shape
            close_ind = np.argmin(new_xyz[:, 2])
            head_pos = new_xyz[close_ind, :] + (0.03, 0.04, 0.01)
            #print head_pos.shape
            #print head_pos

        elif tracking == 2:
            #find the closest point as eye posiiton
            print "camera"
            inds = np.nonzero(xyz[:, 2] > 0.5)
            #print xyz.shape
            new_xyz = xyz[inds]
            #print new_xyz.shape
            close_ind = np.argmin(new_xyz[:, 2])
            head_pos = new_xyz[close_ind, :]
            #print head_pos.shape
            #print head_pos

        else:
            print "please select a tracking mode"

        head_virtual = MirrorReflection(sn4_ref[0:3, :], head_pos)
        print "head_virtual= ", head_virtual

        rgbK = np.array([[520.97092069697146, 0.0, 318.40565581396697],
                         [0.0, 517.85544366622719, 263.46756370601804],
                         [0.0, 0.0, 1.0]])
        rgbD = np.array([[0.22464481251757576], [-0.47968370787671893], [0.0],
                         [0.0]])
        irK = np.array([[588.51686020601733, 0.0, 320.22664144213843],
                        [0.0, 584.73028132692866, 241.98395817513071],
                        [0.0, 0.0, 1.0]])
        irD = np.array([[-0.1273506872313161], [0.36672476189160591], [0.0],
                        [0.0]])

        mapu = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
        mapv = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
        mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
        mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)

        cv.InitUndistortMap(rgbK, rgbD, mapu, mapv)
        cv.InitUndistortMap(irK, irD, mapx, mapy)

        if 1:
            rgb_remap = cv.CloneImage(img)
            cv.Remap(img, rgb_remap, mapu, mapv)

            depth_remap = cv.CloneImage(im)
            cv.Remap(im, depth_remap, mapx, mapy)

        scene4_cross = Cross4Pts.CrossPts(xyz, uv, head_pos, head_virtual,
                                          sn4_ref)
        #[warp] Add whole warpping code here
        #[warp] points = Scene4Pts() as warpping 4 pts
        #Flip the dst image!!!!!!!!!
        #ShowImage("rgb_warp", dst)

        #Within/out of the rgb range
        #Mapping Destination (width, height)=(x,y)

        #Warning: the order of pts in clockwise: pt1(L-T),pt2(R-T),pt3(R-B),pt4(L-B)
        #points = [(test[0,0],test[0,1]), (630.,300.), (700.,500.), (400.,470.)]
        points = [(scene4_cross[0, 0], scene4_cross[0, 1]),
                  (scene4_cross[1, 0], scene4_cross[1, 1]),
                  (scene4_cross[2, 0], scene4_cross[2, 1]),
                  (scene4_cross[3, 0], scene4_cross[3, 1])]
        #Warping the image without flipping (camera image)
        #npoints  = [(0.,0.), (640.,0.), (640.,480.), (0.,480.)]
        #Warping the image with flipping (mirror flip image)
        npoints = [(640., 0.), (0., 0.), (0., 480.), (640., 480.)]
        mat = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.GetPerspectiveTransform(points, npoints, mat)

        #src = cv.CreateImage( cv.GetSize(img), cv.IPL_DEPTH_32F, 3 )
        src = cv.CreateImage(cv.GetSize(rgb_remap), cv.IPL_DEPTH_32F, 3)
        #cv.ConvertScale(img,src,(1/255.00))
        cv.ConvertScale(rgb_remap, src, (1 / 255.00))

        dst = cv.CloneImage(src)
        cv.Zero(dst)
        cv.WarpPerspective(src, dst, mat)
        #************************************************************************

        #Remap the rgb and depth image
        #Warping will use remap rgb image as src

        if 1:
            cv.ShowImage("RGB_remap", rgb_remap)  #rgb[200:440,300:600,::-1]
            cv.ShowImage("Depth_remap", depth_remap)
            cv.ShowImage("dst", dst)  #warp rgb image

        if cv.WaitKey(5) == 27:
            cv.DestroyWindow("RGB_remap")
            cv.DestroyWindow("Depth_remap")
            cv.DestroyWindow("dst")
            break
Beispiel #23
0
cv.SetCaptureProperty(captura, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

#cv.NamedWindow("Fundo", 0)
cv.NamedWindow("Webcam", 1)
cv.NamedWindow("Mascara", 0)
cv.NamedWindow("Cinza", 1)

mascara = cv.CreateImage((640, 480), 8, 3)
cinza = cv.CreateImage((640, 480), 8, 1)

while True:
    print("Por Favor tire uma foto do fundo estatico do seu video.")
    print("Aperte a tecla espaco.")
    if cv.WaitKey(0) % 0x100 == 32:
        primeiraImagem = cv.QueryFrame(captura)
        fundo = cv.CloneImage(primeiraImagem)
        cv.Smooth(fundo, fundo, cv.CV_GAUSSIAN, 3)
        print("Tirou uma Foto !")
        break

while True:
    maiorArea = 0

    imagem = cv.QueryFrame(captura)
    cv.Smooth(imagem, imagem, cv.CV_GAUSSIAN, 3)

    cv.AbsDiff(imagem, fundo, mascara)
    cv.CvtColor(mascara, cinza, cv.CV_BGR2GRAY)
    cv.Threshold(cinza, cinza, 50, 255, cv.CV_THRESH_BINARY)

    cv.Dilate(cinza, cinza, None, 4)
Beispiel #24
0
def pegarIris(caminho, nameFoto="teste.bmp", salvarImage=True):
    orig = cv2.cv.LoadImage(caminho)

    orig2 = cv2.cv.CloneImage(orig)

    # create tmp images
    grey_scale = cv.CreateImage(cv.GetSize(orig), 8, 1)
    processedPupila = cv.CreateImage(cv.GetSize(orig), 8, 1)
    processedIris = cv.CreateImage(cv.GetSize(orig), 8, 1)

    cv.Smooth(orig, orig, cv.CV_GAUSSIAN, 3, 3)

    cv.CvtColor(orig, grey_scale, cv.CV_RGB2GRAY)
    cv.CvtColor(orig, processedIris, cv.CV_RGB2GRAY)

    cv.Smooth(grey_scale, processedPupila, cv.CV_GAUSSIAN, 15, 15)
    cv.Canny(processedPupila, processedPupila, 5, 70, 3)
    cv.Smooth(processedPupila, processedPupila, cv.CV_GAUSSIAN, 15, 15)
    cv2.cv.SaveImage("Processamento_final_pupila.jpg", processedPupila)
    #cv.ShowImage("pupila_processada", processedPupila)

    cv.Smooth(grey_scale, processedIris, cv.CV_GAUSSIAN, 15, 15)
    cv.Canny(processedIris, processedIris, 5, 70, 3)
    cv.Smooth(processedIris, processedIris, cv.CV_GAUSSIAN, 15, 15)
    cv.Smooth(processedIris, processedIris, cv.CV_GAUSSIAN, 15, 15)
    cv2.cv.SaveImage("Processamento_final_iris.jpg", processedPupila)
    #cv.ShowImage("pupila_processada2", processedIris)

    #cv.Erode(processedIris, processedIris, None, 10)
    #cv.Dilate(processedIris, processedIris, None, 10)
    #cv.Canny(processedIris, processedIris, 5, 70, 3)
    #cv.Smooth(processedIris, processedIris, cv.CV_GAUSSIAN, 15, 15)
    #cv.Smooth(processedIris, processedIris, cv.CV_GAUSSIAN, 15, 15)

    #cv.Smooth(processedPupila, processedIris, cv.CV_GAUSSIAN, 15, 15)
    #cv.ShowImage("Iris_processada", processedIris)
    #cv.Dilate(processedIris, processedIris, None, 10)

    storagePupila = cv.CreateMat(orig.width, 1, cv.CV_32FC3)
    storageIris = cv.CreateMat(orig.width, 1, cv.CV_32FC3)

    # these parameters need to be adjusted for every single image
    HIGH = 30
    LOW = 20

    HIGH2 = 120
    LOW2 = 60

    imgBranca = cv.CreateImage(cv.GetSize(orig), 8, 3)
    imgPreta = cv.CreateImage(cv.GetSize(orig), 8, 3)
    cv.Zero(imgPreta)
    cv.Not(imgPreta, imgBranca)

    imagemMaskPupila = cv.CreateImage(cv.GetSize(orig), 8, 3)
    imagemMaskPupila = cv.CloneImage(imgBranca)

    imagemMaskIris = cv.CreateImage(cv.GetSize(orig), 8, 3)
    imagemMaskIris = cv.CloneImage(imgPreta)

    #try:
    # extract circles
    #cv2.cv.HoughCircles(processedIris, storageIris, cv.CV_HOUGH_GRADIENT, 3, 100.0,LOW,HIGH, LOW2, HIGH2)
    cv2.cv.HoughCircles(processedPupila, storagePupila, cv.CV_HOUGH_GRADIENT,
                        2, 100.0, LOW, HIGH)
    cv2.cv.HoughCircles(processedIris, storageIris, cv.CV_HOUGH_GRADIENT, 3,
                        100.0, LOW, HIGH, LOW2, HIGH2)

    #Circulos da pupila
    #for i in range(0, len(np.asarray(storagePupila))):
    RadiusPupila = int(np.asarray(storagePupila)[0][0][2])
    xPupila = int(np.asarray(storagePupila)[0][0][0])
    yPupila = int(np.asarray(storagePupila)[0][0][1])
    centerPupila = (xPupila, yPupila)
    #print "RadiusPupila %d" %RadiusPupila

    cv.Circle(imagemMaskPupila, centerPupila, RadiusPupila, cv.CV_RGB(0, 0, 0),
              -1, 8, 0)
    cv.Circle(orig, centerPupila, 1, cv.CV_RGB(0, 255, 0), -1, 8, 0)
    cv.Circle(orig, centerPupila, RadiusPupila, cv.CV_RGB(255, 0, 0), 3, 8, 0)
    #cv.ShowImage("pupila"+str(0), orig)
    cv2.cv.SaveImage("macaraPupila.jpg", imagemMaskPupila)
    orig = cv.CloneImage(orig2)

    #cv.WaitKey(0)

    #Circulos da Iris
    #for i in range(0, len(np.asarray(storageIris))):
    RadiusIris = int(np.asarray(storageIris)[0][0][2])
    xIris = int(np.asarray(storageIris)[0][0][0])
    yIris = int(np.asarray(storageIris)[0][0][1])
    centerIris = (xIris, yIris)
    #print "RadiusIris %d" %RadiusIris

    cv.Circle(imagemMaskIris, centerIris, RadiusIris, cv.CV_RGB(255, 255, 255),
              -1, 8, 0)
    cv2.cv.SaveImage("macaraIris.jpg", imagemMaskIris)
    cv.Circle(orig, centerIris, 1, cv.CV_RGB(0, 255, 0), -1, 8, 0)
    cv.Circle(orig, centerIris, RadiusIris, cv.CV_RGB(255, 0, 0), 3, 8, 0)
    #cv.ShowImage("Iris"+str(0), orig)
    orig = cv.CloneImage(orig2)

    #cv.WaitKey(0)

    #except:
    #    print "nothing found"
    #    pass
    #criando imagem final
    finalAux = cv2.cv.CreateImage(cv.GetSize(orig), 8, 3)
    final = cv2.cv.CreateImage(cv.GetSize(orig), 8, 3)

    #pegando a iris toda
    cv2.cv.And(orig, imagemMaskPupila, finalAux)
    cv2.cv.SaveImage("pupila_cortada.jpg", finalAux)
    cv2.cv.And(finalAux, imagemMaskIris, final)
    cv2.cv.SaveImage("iris_pupila_cortada.jpg", final)

    if salvarImage:
        cv2.cv.SaveImage(nameFoto, final)
    #cv.ShowImage("original with circles", final)
    #cv.WaitKey(0)
    return final
Beispiel #25
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]

            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

            if len(points):
                center_point = reduce(
                    lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2),
                    points)
                cv.Circle(color_image, center_point, 40,
                          cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 30,
                          cv.CV_RGB(255, 100, 0), 1)
                cv.Circle(color_image, center_point, 20,
                          cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 10,
                          cv.CV_RGB(255, 100, 0), 1)

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
Beispiel #26
0
        imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
        cv.SetData(imagefiledata, filedata, len(filedata))
        img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)

    rng = cv.RNG(-1)

    print "Hot keys:"
    print "\tESC - quit the program"
    print "\tr - restore the original image"
    print "\tw - run watershed algorithm"
    print "\t  (before that, roughly outline several markers on the image)"

    cv.NamedWindow("image", 1)
    cv.NamedWindow("watershed transform", 1)

    img = cv.CloneImage(img0)
    img_gray = cv.CloneImage(img0)
    wshed = cv.CloneImage(img0)
    marker_mask = cv.CreateImage(cv.GetSize(img), 8, 1)
    markers = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_32S, 1)

    cv.CvtColor(img, marker_mask, cv.CV_BGR2GRAY)
    cv.CvtColor(marker_mask, img_gray, cv.CV_GRAY2BGR)

    cv.Zero(marker_mask)
    cv.Zero(wshed)

    cv.ShowImage("image", img)
    cv.ShowImage("watershed transform", wshed)

    sk = Sketcher("image", [img, marker_mask])
Beispiel #27
0
	print "h", h
	print "v", v    
    #f = open ("/dev/ttyUSB0", "a")
    #f.write("h "+str(h)+chr(10))
    #f.write("v "+str(v)+chr(10))
    #f.close()

        
init_frames = 20
while True:
    im = cv.QueryFrame(camera)
    
    cv.ShowImage("CamView", im)
    
    mono = cv.CreateImage((im.width, im.height), cv.IPL_DEPTH_8U, 1)
    view = cv.CloneImage(im)

    cv.CvtColor(im, mono, cv.CV_RGB2GRAY)
    cv.ShowImage("Gray", mono)
    faces = cv.HaarDetectObjects(mono, hc, memstorage)
    
    
    for (x,y,w,h),n in faces:
        cv.Rectangle(view, (x,y), (x+w,y+h), (255,255,0))
    
    cv.ShowImage("Faces", view)    
    
    #hsv = cv.CreateMat(im.width, im.height, cv.CV_8UC3)
    #hsv = cv.CloneImage(im)
    #cv.CvtColor(im, hsv, cv.CV_RGB2HSV)
    #cv.Smooth(hsv, hsv, cv.CV_MEDIAN, 3)
Beispiel #28
0
def DetectaSombra(frame,bg):
    
    dbg = 1
    
    if dbg:
        t1 = time.time()    
 
    
    #print 'Detectando sombras na imagem...'
    
    # gera as imagens de cada canal RGB
    imgCinza = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,1)
    imgHSV = cv.CloneImage(frame) 
            
    imgH = cv.CloneImage(imgCinza)
    imgS = cv.CloneImage(imgCinza)
    imgV = cv.CloneImage(imgCinza)
    
    imgR = cv.CloneImage(imgCinza)
    imgG = cv.CloneImage(imgCinza)
    imgB = cv.CloneImage(imgCinza)
    

    bgCinza = cv.CreateImage(cv.GetSize(bg), cv.IPL_DEPTH_8U,1)
    bgHSV = cv.CloneImage(bg) 
            
    bgH = cv.CloneImage(bgCinza)
    bgS = cv.CloneImage(bgCinza)
    bgV = cv.CloneImage(bgCinza)
    
    bgR = cv.CloneImage(bgCinza)
    bgG = cv.CloneImage(bgCinza)
    bgB = cv.CloneImage(bgCinza)
    
    
    
    # gera as imagens de cada frame e backgroun nos canais de HSV e RGB
    cv.CvtColor(frame, imgHSV, cv.CV_BGR2HSV)            
    cv.Split(imgHSV, imgH, imgS, imgV, None)    
    cv.Split(frame, imgR, imgG, imgB, None)    
    
    cv.CvtColor(bg, bgHSV, cv.CV_BGR2HSV)            
    cv.Split(bgHSV, bgH, bgS, bgV, None)    
    cv.Split(bg, bgR, bgG, bgB, None)


    # inicio de calculos para descobrir sombras.    
    ivbv = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,1)
    cv.Div(imgV, bgV, ivbv,255)    

    isbs = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,1)
    cv.Sub(imgS, bgS, isbs)

    ihbh = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,1)
    cv.AbsDiff(imgH, bgH, ihbh)

    # parametros de deteccao de sombra
    alfa = 190
    beta = 210
        
    thrSat = 20    
    thrHue = 50



    alfa = 220
    beta = 240
        
    thrSat = 90    
    thrHue = 90




    
    nErode = 0
    nDilate = 0
    
    # trata ivbv
    imgThr_ivbv = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,1)
    # deixa apenas os menores que beta
    cv.Threshold(ivbv, imgThr_ivbv, beta, 255, cv.CV_THRESH_TRUNC)       
    # deixa apenas os maiores que alfa
    cv.Threshold(imgThr_ivbv, imgThr_ivbv, alfa, 255, cv.CV_THRESH_TOZERO)
    # binariza    
    cv.Threshold(imgThr_ivbv, imgThr_ivbv, alfa, 255, cv.CV_THRESH_BINARY)    


    # trata isbs
    imgThr_isbs = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,1)
    # deixa apenas os menores que thrSat
    cv.Threshold(isbs, imgThr_isbs, thrSat, 255, cv.CV_THRESH_BINARY)       


    # trata isbs
    imgThr_ihbh = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,1)
    # deixa apenas os menores que thrSat
    cv.Threshold(ihbh, imgThr_ihbh, thrHue, 255, cv.CV_THRESH_BINARY_INV)       


    
    # onde é preto em todas as imagens, é sombra
    imgSombra = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,1)
    
    
    cv.Not(imgThr_ivbv,imgThr_ivbv)
    cv.Not(imgThr_isbs,imgThr_isbs)
    
    cv.And(imgThr_ivbv,imgThr_isbs,imgSombra)
    
    cv.Not(imgThr_ihbh,imgThr_ihbh)
        
    cv.And(imgSombra,imgThr_ihbh,imgSombra)

    for i in range(nErode):
        cv.Erode(imgSombra,imgSombra)
    
    for i in range(nDilate):
        cv.Dilate(imgSombra,imgSombra)


    


    #if dbg:
        #print 'Tempo para detectar sombras: %.5f' % (time.time() - t1)    
    #exibe frames de saida
    
    
    
    #destaca de verde a sombra sobre o frame
    frameDestacado = cv.CloneImage(frame)
    
    cv.Or(imgG,imgSombra,imgG)    
    
    cv.Merge(imgR, imgG, imgB,None, frameDestacado)
    

    '''    
    cv.ShowImage('frameDestacado',frameDestacado)
    cv.WaitKey()
    '''
    
    
    retorno = {}
    retorno['sombra'] = imgSombra
    retorno['sombraDestacada'] = frameDestacado
    
    
    
    
    return retorno
    

    cv.ShowImage('ivbv', ivbv)    
    cv.ShowImage('isbs', isbs)    
    cv.ShowImage('ihbh', ihbh)

    cv.ShowImage('imgThr_isbs', imgThr_isbs)
    cv.ShowImage('imgThr_ivbv', imgThr_ivbv)    
    cv.ShowImage('imgThr_ihbh', imgThr_ihbh)
    
    cv.ShowImage('imgSombra', imgSombra)

    
    
    cv.WaitKey()
    
    sys.exit()


    frameMerge = cv.CloneImage(frame)    
    cv.Merge(imgR, imgR, imgR,None, frameMerge)


       
    cv.ShowImage('frame', frame)
    cv.ShowImage('frameMerge', frameMerge)
    
    
    cv.ShowImage('imgR', imgR)
    cv.ShowImage('imgG', imgG)
    cv.ShowImage('imgB', imgB)
    
    cv.ShowImage('imgH', imgH)
    cv.ShowImage('imgS', imgS)
    cv.ShowImage('imgV', imgV)
    
    cv.WaitKey()

    
    
    
    
    
    
    
    
    
    
    
    
    
    return 0
Beispiel #29
0
cv.NamedWindow('camera', cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow('threshed', cv.CV_WINDOW_AUTOSIZE)
#cv.NamedWindow('cropped', cv.CV_WINDOW_AUTOSIZE)

# initialize position array
positions_x, positions_y = [0] * SMOOTHNESS, [0] * SMOOTHNESS

# read from the camera
while 1:
    image = cv.QueryFrame(capture)
    #    image = cv.LoadImage("2012_automata.jpg")
    if not image:
        break

    # smooth the image
    image_smoothed = cv.CloneImage(image)
    cv.Smooth(image, image_smoothed, cv.CV_GAUSSIAN, 9)
    # threshold the smoothed image
    image_threshed = thresholded_image(image_smoothed)

    # blobify
    cv.Dilate(image_threshed, image_threshed, None, 3)
    cv.Erode(image_threshed, image_threshed, None, 3)

    blobContour = None

    # extract the edges from our binary image
    current_contour = cv.FindContours(cv.CloneImage(image_threshed),
                                      cv.CreateMemStorage(), cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)
    cv.DrawContours(image, current_contour, (0, 0, 255), (0, 100, 100), 4)
Beispiel #30
0
            if x != None:
                #                if  (cv.ContourArea(contourCluster) > 20):
                centerArr.append(contourCenter(contourCluster))
            contourCluster = contourCluster.h_next()
            if (not contourCluster):
                return centerArr


initialRotate = 30
cv.NamedWindow('camera', cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow('threshold', cv.CV_WINDOW_AUTOSIZE)
capture = cv.CaptureFromCAM(0)

#image = cv.QueryFrame(capture)
image = cv.LoadImage('2012_automata_1.png')
imageCopy = cv.CloneImage(image)
imageTreshold = thresholded_image(image, (0, 0, 0, 0), (0, 0, 0, 0))
dialatecount = 0
thresholdrange = []
hsvImg = hsv_image(image)
cv.ShowImage("camera", image)
cv.ShowImage('threshold', imageTreshold)

while 1:
    cv.SetMouseCallback("camera", setTreshold, 0)
    keyPressed = cv.WaitKey(10000)
    if keyPressed == 27:
        break
    elif keyPressed == -1:
        pass
    elif keyPressed == 114: