Пример #1
0
def merge(*channels):
    debug = False
    assert len(channels) > 0

    firstDepth = channels[0].depth
    assert all([channel.depth == firstDepth for channel in channels])

    firstSize = cv.GetSize(channels[0])
    assert all([cv.GetSize(channel) == firstSize for channel in channels])

    chans = 3 if len(channels) == 2 else len(
        channels
    )  #For filling up the 3rd chan when there are 2 imgs, add room for this 3rd channel
    if debug: print "merging %i channels" % chans
    out = cv.CreateImage(cv.GetSize(channels[0]), channels[0].depth, chans)
    #print out

    if len(channels) == 4:
        cv.Merge(channels[0], channels[1], channels[2], channels[3], out)
    elif len(channels) == 3:
        cv.Merge(channels[0], channels[1], channels[2], None, out)
    elif len(channels) == 2:
        filler = cv.CreateImage(cv.GetSize(channels[0]), channels[0].depth, 1)
        cv.Merge(channels[0], channels[1], filler, None,
                 out)  #TODO: make pos of filler configable
    elif len(channels) == 1:
        cv.Merge(channels[0], None, None, None, out)
    return out
Пример #2
0
 def hist_eq(self, frame):
     cv.Split(frame, self.B, self.G, self.R, None)
     cv.EqualizeHist(self.R, self.R)
     cv.EqualizeHist(self.R, self.B)
     cv.EqualizeHist(self.G, self.G)
     cv.Merge(self.B, self.G, self.R, None, self.Ieq)
     return self.Ieq
Пример #3
0
 def mask(self, mask, r, g, b):
     cv.Mul(r, mask, self.thres_red_img)
     cv.Mul(g, mask, self.thres_green_img)
     cv.Mul(b, mask, self.thres_blue_img)
     cv.Merge(self.thres_blue_img, self.thres_green_img, self.thres_red_img,
              None, self.merged_frame)
     return self.merged_frame
Пример #4
0
    def frame(self):
        _ = self.stdout.readline()
        #print line
        #print self.w,self.h
        y = self.stdout.read(self.w * self.h)
        u = self.stdout.read(self.w * self.h / 4)
        v = self.stdout.read(self.w * self.h / 4)
        if len(y) < self.w * self.h:
            raise EOFError

        cv.SetData(self.frame_y, y)
        cv.SetData(self.frame_u2, u)
        cv.SetData(self.frame_v2, v)

        cv.Resize(self.frame_u2, self.frame_u)
        cv.Resize(self.frame_v2, self.frame_v)

        cv.Merge(self.frame_y, self.frame_u, self.frame_v, None,
                 self.frame_col)
        cv.CvtColor(self.frame_col, self.frame_col, cv.CV_YCrCb2RGB)

        out = self.frame_col

        if self.size != None:
            cv.Resize(self.frame_col, self.frame_resized)
            out = self.frame_resized

        return pv.Image(self.frame_y), pv.Image(self.frame_u), pv.Image(
            self.frame_v), pv.Image(out)
Пример #5
0
def update_mhi(img, dst, diff_threshold):
    global last
    global mhi
    global mask
    timestamp = time.clock() / CLOCKS_PER_SEC  # get current time in seconds
    size = cv.GetSize(img)  # get current frame size
    idx1 = last
    if not mhi or cv.GetSize(mhi) != size:
        for i in range(N):
            buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
            cv.Zero(buf[i])
        mhi = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
        cv.Zero(mhi)  # clear MHI at the beginning
        mask = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)

    cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY)  # convert frame to grayscale
    idx2 = (last + 1) % N  # index of (last - (N-1))th frame
    last = idx2
    silh = buf[idx2]
    cv.AbsDiff(buf[idx1], buf[idx2], silh)  # get difference between frames
    cv.Threshold(silh, silh, diff_threshold, 1,
                 cv.CV_THRESH_BINARY)  # and threshold it
    cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION)  # update MHI
    cv.CvtScale(mhi, mask, 255. / MHI_DURATION,
                (MHI_DURATION - timestamp) * 255. / MHI_DURATION)
    cv.Zero(dst)
    cv.Merge(mask, None, None, None, dst)
Пример #6
0
def get_mask_with_contour(img,
                          ret_img=False,
                          ret_cont=False,
                          with_init_mask=False,
                          cont_color=cv.RGB(255, 50, 50),
                          normalize=True,
                          skin_version=1,
                          strong=False):
    if normalize:
        img = normalize_rgb(img, aggressive=0.005)
    mask = skin_mask(img) if skin_version == 1 else skin_mask2(img)

    di_mask = image_empty_clone(mask)
    cv.Dilate(mask, di_mask)

    seqs = cv.FindContours(cv.CloneImage(di_mask), memory(),
                           cv.CV_RETR_EXTERNAL)

    c_img = image_empty_clone(mask)
    cv.DrawContours(c_img, seqs, 255, 255, 10, -1)

    er_img = image_empty_clone(c_img)
    cv.Erode(c_img, er_img, iterations=2)

    seqs = cv.FindContours(cv.CloneImage(er_img), memory(),
                           cv.CV_RETR_EXTERNAL)
    if not seqs:
        print "no areas"
        return img, None
    seqs = cv.ApproxPoly(seqs,
                         memory(),
                         cv.CV_POLY_APPROX_DP,
                         parameter=3,
                         parameter2=1)

    result = []
    if ret_img:
        #        er_seq_img = cv.CreateImage(sizeOf(er_img), 8, 3)
        #        cv.Zero(er_seq_img)
        er_seq_img = cv.CloneImage(img)
        if with_init_mask:
            cv.Merge(mask, mask, mask, None, er_seq_img)

        if strong:
            cv.DrawContours(er_seq_img, seqs, cont_color, 0, 10, thickness=3)
            cv.DrawContours(er_seq_img,
                            seqs,
                            cv.RGB(0, 0, 0),
                            0,
                            10,
                            thickness=1)
        else:
            cv.DrawContours(er_seq_img, seqs, cont_color, 0, 10, thickness=1)
        result.append(er_seq_img)

    if ret_cont:
        result.append(seqs)

    return result
Пример #7
0
def update_mhi(img, dst, diff_threshold):
    global last
    global mhi
    global storage
    global mask
    global orient
    global segmask
    timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds
    size = cv.GetSize(img) # get current frame size
    idx1 = last
    if not mhi or cv.GetSize(mhi) != size:
        for i in range(N):
            buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
            cv.Zero(buf[i])
        mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        cv.Zero(mhi) # clear MHI at the beginning
        orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1)
    
    cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale
    idx2 = (last + 1) % N # index of (last - (N-1))th frame
    last = idx2
    silh = buf[idx2]
    cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames
    cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it
    cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI
    cv.CvtScale(mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION)
    cv.Zero(dst)
    cv.Merge(mask, None, None, None, dst)
    cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3)
    if not storage:
        storage = cv.CreateMemStorage(0)
    seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA)
    for (area, value, comp_rect) in seq:
        if comp_rect[2] + comp_rect[3] > 100: # reject very small components
            color = cv.CV_RGB(255, 0,0)
            silh_roi = cv.GetSubRect(silh, comp_rect)
            mhi_roi = cv.GetSubRect(mhi, comp_rect)
            orient_roi = cv.GetSubRect(orient, comp_rect)
            mask_roi = cv.GetSubRect(mask, comp_rect)
            angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)

            count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI
            if count < (comp_rect[2] * comp_rect[3] * 0.05):
                continue

            magnitude = 30.
            center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2))
            cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0)
            cv.Line(dst,
                    center,
                    (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)),
                     cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))),
                    color,
                    3,
                    cv.CV_AA,
                    0)
Пример #8
0
def image_callback(data):
    global running
    if (running):
        image = bridge.imgmsg_to_cv(data, "bgr8")

        #normalize image
        cv.Split(image, rgb_r, rgb_g, rgb_b, None)
        red_mean = cv2.mean(np.asarray(rgb_r[:, :]))
        cv.Div(src2=cv.fromarray(np.ones((480, 640))),
               src1=rgb_r,
               dst=scaled_r,
               scale=128 / red_mean[0])
        green_mean = cv2.mean(np.asarray(rgb_g[:, :]))
        cv.Div(src2=cv.fromarray(np.ones((480, 640))),
               src1=rgb_g,
               dst=scaled_g,
               scale=128 / green_mean[0])
        blue_mean = cv2.mean(np.asarray(rgb_b[:, :]))
        cv.Div(src2=cv.fromarray(np.ones((480, 640))),
               src1=rgb_b,
               dst=scaled_b,
               scale=128 / blue_mean[0])
        cv.Merge(scaled_r, scaled_g, scaled_b, None, cv_image)

        cv.CvtColor(cv_image, hsv, cv.CV_BGR2HSV)  # --convert from BGR to HSV
        cv.CvtColor(cv_image, lab, cv.CV_BGR2Lab)

        cv.Split(hsv, hsv_h, hsv_s, hsv_v, None)
        cv.Split(cv_image, rgb_r, rgb_g, rgb_b, None)
        cv.Split(lab, lab_l, lab_a, lab_b, None)
        cv.Split(luv, luv_l, luv_u, luv_v, None)
        cv.Split(hls, hls_h, hls_l, hls_s, None)
        cv.Split(xyz, xyz_x, xyz_y, xyz_x, None)
        cv.Split(ycrcb, ycrcb_y, ycrcb_cr, ycrcb_cb, None)

        cv.Not(lab_a, a_not)
        cv.Sub(hsv_s, a_not, sa)
        cv.Sub(luv_u, hls_h, test)
        cv.Sub(hls_s, hls_h, sminh)

        threshold_red(sa)

        cv.ShowImage("red", red_dilated_image)

        red_contours, _ = cv2.findContours(image=np.asarray(
            red_dilated_image[:, :]),
                                           mode=cv.CV_RETR_EXTERNAL,
                                           method=cv.CV_CHAIN_APPROX_SIMPLE)

        print_lidar_projections(cv_image)

        circles = extract_circles(red_contours, [1, 0, 0])
        for x, y, radius in circles:
            cv.Circle(cv_image, (x, y), radius, [0, 0, 1], 3)

        cv.SetMouseCallback("camera feed", mouse_callback, hsv_image)
        cv.ShowImage("camera feed", cv_image)

        cv.WaitKey(3)
Пример #9
0
    def scanline_numbers_to_planes(self, scanline_numbers):
        rows = scanline_numbers.height
        cols = scanline_numbers.width
        normal_vectors_x = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_x, -1)
        normal_vectors_y = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_y, 0)
        normal_vectors_z = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Copy(scanline_numbers, normal_vectors_z)

        cv.ConvertScale(normal_vectors_z,
                        normal_vectors_z,
                        scale=self.pixels_per_scanline)
        cv.AddS(normal_vectors_z, -self.center_pixel, normal_vectors_z)
        cv.ConvertScale(normal_vectors_z,
                        normal_vectors_z,
                        scale=1.0 / self.projector_model.fx())

        normal_vectors = cv.CreateMat(rows, cols, cv.CV_32FC3)
        cv.Merge(normal_vectors_x, normal_vectors_y, normal_vectors_z, None,
                 normal_vectors)

        # Bring the normal vectors into camera coordinates
        cv.Transform(normal_vectors, normal_vectors,
                     self.projector_to_camera_rotation_matrix)

        normal_vectors_split = [None] * 3
        for i in range(3):
            normal_vectors_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Split(normal_vectors, normal_vectors_split[0],
                 normal_vectors_split[1], normal_vectors_split[2], None)

        n_dot_p = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.SetZero(n_dot_p)
        for i in range(3):
            cv.ScaleAdd(normal_vectors_split[i],
                        self.projector_to_camera_translation_vector[i],
                        n_dot_p, n_dot_p)

        planes = cv.CreateMat(rows, cols, cv.CV_32FC4)
        cv.Merge(normal_vectors_split[0], normal_vectors_split[1],
                 normal_vectors_split[2], n_dot_p, planes)

        return planes
Пример #10
0
def anaglyph(left_color, right_color):
    left_mono = cv.CreateImage(cv.GetSize(left_color), cv.IPL_DEPTH_8U, 1)
    right_mono = cv.CreateImage(cv.GetSize(right_color), cv.IPL_DEPTH_8U, 1)
    green = cv.CreateImage(cv.GetSize(right_color), cv.IPL_DEPTH_8U, 1)
    result = cv.CreateImage(cv.GetSize(right_color), cv.IPL_DEPTH_8U, 3)

    cv.CvtColor(left_color, left_mono, cv.CV_RGB2GRAY)
    cv.CvtColor(right_color, right_mono, cv.CV_RGB2GRAY)
    cv.Merge(left_mono, green, right_mono, None, result)
    return result
Пример #11
0
    def get_pixel_associations(self):
        col_associations = self.get_projector_line_associations(0)
        row_associations = self.get_projector_line_associations(1)

        rospy.loginfo("Merging data...")

        pixel_associations = cv.CreateMat(self.camera_info.height,
                                          self.camera_info.width, cv.CV_32SC2)
        cv.Merge(col_associations, row_associations, None, None,
                 pixel_associations)

        return pixel_associations
Пример #12
0
 def run(self): 
     while True: 
         img = cv.QueryFrame( self.capture ) 
                     
         #blur the source image to reduce color noise 
         cv.Smooth(img, img, cv.CV_BLUR, 3); 
         
         #convert the image to hsv(Hue, Saturation, Value) so its  
         #easier to determine the color to track(hue) 
         hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3) 
         cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV) 
         
         #limit all pixels that don't match our criteria, in this case we are  
         #looking for purple but if you want you can adjust the first value in  
         #both turples which is the hue range(120,140).  OpenCV uses 0-180 as  
         #a hue range for the HSV color model 
         thresholded_img =  cv.CreateImage(cv.GetSize(hsv_img), 8, 1) 
         cv.InRangeS(hsv_img, (115, 75, 75), (135, 255, 255), thresholded_img) 
         
         #determine the objects moments and check that the area is large  
         #enough to be our object 
         thresholded_img2 = cv.GetMat(thresholded_img)
         moments = cv.Moments(thresholded_img2,0) 
         area = cv.GetCentralMoment(moments, 0, 0) 
         
         #there can be noise in the video so ignore objects with small areas 
         if(area > 100000): 
             #determine the x and y coordinates of the center of the object 
             #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
             x = cv.GetSpatialMoment(moments, 1, 0)/area 
             y = cv.GetSpatialMoment(moments, 0, 1)/area 
         
             # print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area) 
             
             
             x = int(x)
             y = int(y)
             
             #create an overlay to mark the center of the tracked object 
             overlay = cv.CreateImage(cv.GetSize(img), 8, 3) 
             
             cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20) 
             cv.Add(img, overlay, img) 
             #add the thresholded image back to the img so we can see what was  
             #left after it was applied 
             cv.Merge(thresholded_img, None, None, None, img) 
          
         #display the image  
         cv.ShowImage(color_tracker_window, img) 
         
         if cv.WaitKey(10) == 27: 
             break 
Пример #13
0
    def process_motion(self,img):
        center = (-1, -1)
        # a lot of stuff from this section was taken from the code motempl.py, 
        #  openCV's python sample code
        timestamp = time.clock() / self.clocks_per_sec # get current time in seconds
        idx1 = self.last
        cv.CvtColor(img, self.buf[self.last], cv.CV_BGR2GRAY) # convert frame to grayscale
        idx2 = (self.last + 1) % self.n_frames 
        self.last = idx2
        silh = self.buf[idx2]
        cv.AbsDiff(self.buf[idx1], self.buf[idx2], silh) # get difference between frames
        cv.Threshold(silh, silh, 30, 1, cv.CV_THRESH_BINARY) # and threshold it
        cv.UpdateMotionHistory(silh, self.mhi, timestamp, self.mhi_duration) # update MHI
        cv.ConvertScale(self.mhi, self.mask, 255./self.mhi_duration, 
                        (self.mhi_duration - timestamp)*255./self.mhi_duration)
        cv.SetZero(img)
        cv.Merge(self.mask, None, None, None, img)
        cv.CalcMotionGradient(self.mhi, self.mask, self.orient, self.max_time_delta, self.min_time_delta, 3)
        seq = cv.SegmentMotion(self.mhi, self.segmask, self.storage, timestamp, self.max_time_delta)
        inc = 0
        a_max = 0
        max_rect = -1
    
        # there are lots of things moving around
        #  in this case just find find the biggest change on the image
        for (area, value, comp_rect) in seq:
            if comp_rect[2] + comp_rect[3] > 60: # reject small changes
                if area > a_max: 
                    a_max = area
                    max_rect = inc
            inc += 1

        # found it, now just do some processing on the area.
        if max_rect != -1:
            (area, value, comp_rect) = seq[max_rect]
            color = cv.CV_RGB(255, 0,0)
            silh_roi = cv.GetSubRect(silh, comp_rect)
            # calculate number of points within silhouette ROI
            count = cv.Norm(silh_roi, None, cv.CV_L1, None)

            # this rectangle contains the overall motion ROI
            cv.Rectangle(self.motion, (comp_rect[0], comp_rect[1]), 
                         (comp_rect[0] + comp_rect[2], 
                          comp_rect[1] + comp_rect[3]), (0,0,255), 1)

            # the goal is to report back a center of movement contained in a rectangle
            # adjust the height based on the number generated by the slider bar
            h = int(comp_rect[1] + (comp_rect[3] * (float(self.height_value) / 100)))
            # then calculate the center
            center = ((comp_rect[0] + comp_rect[2] / 2), h)

        return center
Пример #14
0
def add_alpha_channel(bgr, alpha_val):
    w, h = cv.GetSize(bgr)
    bgra = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 4)
    alpha = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 1)
    chan1 = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 1)
    chan2 = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 1)
    chan3 = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 1)
    [cv.Set(c, 0) for c in [chan1, chan2, chan3, bgra, alpha]]

    cv.Split(bgr, chan1, chan2, chan3, None)
    cv.Set(alpha, (alpha_val))
    cv.Merge(chan1, chan2, chan3, alpha, bgra)
    return bgra
Пример #15
0
def merge_images(img0, img1, img2):
    """ takes images in bgr order """
    images = profile.evaluate(lambda: load_images([img0, img1, img2]),
                              "Image loading")
    for i in images:
        assert i.depth == cv.IPL_DEPTH_8U
    images = profile.evaluate(lambda: resize(images), "Image resizing")
    mergedImage = cv.CreateImage(min_size(images), cv.IPL_DEPTH_8U, 3)
    profile.evaluate(
        lambda: cv.Merge(images[0], images[1], images[2], None, mergedImage),
        "Image merging")

    return mergedImage
Пример #16
0
def remove_channels(in_bgra, channel_indices):
    w, h = cv.GetSize(in_bgra)
    chan1 = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 1)
    chan2 = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 1)
    chan3 = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 1)
    chan4 = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 1)
    bgra = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 4)
    [cv.Set(c, 0) for c in [chan1, chan2, chan3, chan4, bgra]]
    cv.Split(in_bgra, chan1, chan2, chan3, chan4)
    chan_list = [chan1, chan2, chan3, chan4]
    for i in channel_indices:
        chan_list[i] = None
    chan_list.append(bgra)
    cv.Merge(*tuple(chan_list))
    return bgra
Пример #17
0
def doCanny_3(im, low, high, aperture, out=None):
    in1 = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1)
    in2 = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1)
    in3 = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1)

    cv.Split(im, in1, in2, in3, None)

    out1 = canny(in1, low, high, aperture)
    out2 = canny(in2, low, high, aperture)
    out3 = canny(in3, low, high, aperture)

    if not out: out = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 3)
    cv.Merge(out1, out2, out3, None, out)

    return out
Пример #18
0
def clone(src, _type='gray2bgr'):
    """
        Clone the image.
        @param _type: a string, and it can be one of the following:
            "gray2bgr"
        @returns: the cloned image
    """
    if _type.lower() == "gray2bgr":
        ret = cv.CreateImage((src.width, src.height), cv.IPL_DEPTH_8U, 3)
        r = cv.CloneImage(src)
        g = cv.CloneImage(src)
        b = cv.CloneImage(src)
        cv.Merge(r, g, b, None, ret)
        return ret
    else:
        raise ValueError("Unknown _type value.")
Пример #19
0
    def process_image(self, image, texture_type):
        spare = image
        #return image
        # get the size of the current image
        size = (image.width, image.height)

        cv.Smooth(spare, spare, cv.CV_GAUSSIAN, BLUR_SIZE, BLUR_SIZE)

        #out = cv.CreateImage( size, 8, 1)
        cannyB = cv.CreateImage(size, 8, 1)
        cannyR = cv.CreateImage(size, 8, 1)
        sobel = cv.CreateImage(size, 8, 1)
        yuv = cv.CreateImage(size, 8, 3)
        dest_canny = cv.CreateImage(size, 8, 3)
        gray = cv.CreateImage(size, 8, 1)

        cv.CvtColor(spare, yuv, cv.CV_BGR2YCrCb)
        cv.Split(yuv, gray, None, None, None)

        cv.Canny(gray, cannyB, 5, 50, 3)
        cv.Canny(gray, cannyR, 5, 150, 3)
        cv.Sobel(gray, sobel, 1, 0, 3)

        #cv.ConvertScale(sobel, sobel, -1, 255 )
        #cv.ConvertScale(cannyR, cannyR, -1, 155 )
        #cv.ConvertScale(gray, gray, -1, 255 )
        #cv.ConvertScale(cannyB, cannyB, -1, 255 )

        cv.Smooth(cannyR, cannyR, cv.CV_GAUSSIAN, 3, 3)
        cv.Smooth(cannyB, cannyB, cv.CV_GAUSSIAN, 3, 3)
        #cv.CvtColor( canny, canny, cv.CV_YCrCb2BGR )
        #cv.Merge(sobel, gray, sobel, None, dest)
        cv.Merge(cannyR, cannyB, gray, None, dest_canny)

        #cv.Merge(sobel, sobel, sobel, None, dest_sobel)
        #cv.Smooth( dest, dest, cv.CV_GAUSSIAN, BLUR_SIZE, BLUR_SIZE )
        #cv.ShowImage( 'canny', dest)

        #cv.Merge
        #cv.Smooth( dest_canny, dest_canny, cv.CV_GAUSSIAN, BLUR_SIZE, BLUR_SIZE )

        #success = True
        self.prevImage = image

        options = {'normal': dest_canny, 'post': dest_canny}
        #return yuv
        return options[texture_type]
Пример #20
0
    def combine_images(self, images):

        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.4, 1.4, 0, 2, 8)

        for i,(image, name) in enumerate(images):
            if image.nChannels == 1:
                cv.Merge(image, image, image, None, self.temp)
            else:
                cv.Copy(image, self.temp)
            xoffset = (i % self.xwindows) * self.smallsize[0]
            yoffset = (i / self.xwindows) * self.smallsize[1]
            cv.SetImageROI(self.combined, (xoffset, yoffset, self.smallsize[0],
                self.smallsize[1]))
            cv.Copy(self.temp, self.combined)
            cv.PutText(self.combined, name, (5, 40), font, (30, 200, 200))
            cv.ResetImageROI(self.combined)
        return self.combined
Пример #21
0
def run_tesseract(image):
    if not tesseract_api.Init(tessdata_prefix, 'eng', tesseract.OEM_DEFAULT):
        print >> sys.stderr, "Error initializing tesseract"
        exit(1)
    tesseract_api.SetPageSegMode(tesseract.PSM_SINGLE_LINE)
    # api.SetPageSegMode(tesseract.PSM_AUTO)
    # cvimg = cv2.imread('test.png')
    # tesseract_api.SetImage(cvimg)
    # return "123", np.array([60])
    source = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 3)
    cv.Merge(image, image, image, None, source)
    tesseract_api.SetImage(cv2num(source))
    text = tesseract_api.GetUTF8Text()
    text = text.encode('ascii', 'ignore').strip()
    score = np.array(tesseract_api.AllWordConfidences())
    tesseract_api.Clear()
    return text, score
Пример #22
0
def saturate(frame):
    cv.CvtColor(frame, frame, cv.CV_BGR2HSV)
    hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
    sat = cv.CreateImage(cv.GetSize(frame), 8, 1)
    val = cv.CreateImage(cv.GetSize(frame), 8, 1)
    cv.Split(frame, hue, sat, val, None)
    for y in range(0, frame.height):
        for x in range(0, frame.width):
            saturation = cv.GetReal2D(sat, y, x)
            if (saturation < 200):
                saturation = saturation + 50
                cv.SetReal2D(sat, y, x, saturation)
    cv.Merge(hue, sat, val, None, frame)
    del hue
    del sat
    del val
    cv.CvtColor(frame, frame, cv.CV_HSV2BGR)
    return (frame)
Пример #23
0
    def hsv_normalise(self, frame):
        """Should normalise scene lighting

        Works by setting the HSV Value component to a constant.
        However, turns out that this does not adequately remove shadows.
        Maybe parameter tweaking the Value constant might do something? TODO
        """
        tmp = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, tmp, cv.CV_BGR2HSV)

        H, S, V = [cv.CreateImage(cv.GetSize(frame), 8, 1) for _ in range(3)]
        cv.Split(tmp, H, S, V, None)

        cv.Set(V, 140)

        cv.Merge(H, S, V, None, tmp)
        cv.CvtColor(tmp, tmp, cv.CV_HSV2BGR),
        out = tmp

        return out
Пример #24
0
def project_pixels_to_3d_rays(pixels, model):
    x = cv.CreateMat(pixels.height, pixels.width, cv.CV_32FC1)
    y = cv.CreateMat(pixels.height, pixels.width, cv.CV_32FC1)
    cv.Split(pixels, x, y, None, None)

    x_squared = cv.CreateMat(pixels.height, pixels.width, cv.CV_32FC1)
    cv.Pow(x, x_squared, 2)

    y_squared = cv.CreateMat(pixels.height, pixels.width, cv.CV_32FC1)
    cv.Pow(y, y_squared, 2)

    inverse_norm = cv.CreateMat(pixels.height, pixels.width, cv.CV_32FC1)
    cv.Add(x_squared, y_squared, inverse_norm)
    cv.AddS(inverse_norm, 1, inverse_norm)
    cv.Pow(inverse_norm, inverse_norm, -0.5)

    cv.Mul(x, inverse_norm, x)
    cv.Mul(y, inverse_norm, y)

    result = cv.CreateMat(pixels.height, pixels.width, cv.CV_32FC3)
    cv.Merge(x, y, inverse_norm, None, result)
    return result
Пример #25
0
    def combine_images(self, images):
        """ render a list of images into one opencv frame """
        comb_width = self.smallwidth * XWINDOWS
        comb_height = self.smallheight * int(
            math.ceil(len(images) / float(XWINDOWS)))
        print '%d %d' % (comb_height, self.smallheight)
        self.combined = cv.CreateImage((comb_width, comb_height),
                                       cv.IPL_DEPTH_8U, 3)

        for i, image in enumerate(images):
            if image.nChannels == 1:
                cv.Merge(image, image, image, None, self.temp3)
            else:
                cv.Copy(image, self.temp3)
            xoffset = (i % XWINDOWS) * self.smallsize[0]
            yoffset = (i / XWINDOWS) * self.smallsize[1]
            cv.SetImageROI(
                self.combined,
                (xoffset, yoffset, self.smallsize[0], self.smallsize[1]))
            cv.Copy(self.temp3, self.combined)
            cv.ResetImageROI(self.combined)
        return self.combined
Пример #26
0
    def track_blobs(self, frame):
        spare = cv.CloneImage(frame)
        size = cv.GetSize(frame)

        hsv = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        out = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        thresh = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)

        print self.min_hue, self.value_dict['min_hue']

        cv.Smooth(spare, spare, cv.CV_BLUR, 22, 22)
        cv.CvtColor(spare, hsv, cv.CV_BGR2HSV)

        cv.InRangeS(hsv, cv.Scalar(self.min_hue, self.min_sat, self.min_val),
                    cv.Scalar(self.max_hue, self.max_sat, self.max_val),
                    thresh)

        cv.Merge(thresh, thresh, thresh, None, out)
        contours = cv.FindContours(thresh, self.storage, cv.CV_RETR_LIST,
                                   cv.CV_CHAIN_APPROX_SIMPLE)

        try:
            M = cv.Moments(contours)
        except:
            return out

        m0 = cv.GetCentralMoment(M, 0, 0)

        if m0 > 1.0:
            self.cx = cv.GetSpatialMoment(M, 1, 0) / m0
            self.cy = cv.GetSpatialMoment(M, 0, 1) / m0
            cv.Circle(frame, (int(self.cx), int(self.cy)), 2, (255, 0, 0), 20)
        if self.show_frame is not True:
            return out
        else:
            return frame

        pass
Пример #27
0
def normalizeImage(frame):
    redChannel = cv.CreateImage(cv.GetSize(frame), 8, 1)
    greenChannel = cv.CreateImage(cv.GetSize(frame), 8, 1)
    blueChannel = cv.CreateImage(cv.GetSize(frame), 8, 1)
    redAvg = cv.CreateImage(cv.GetSize(frame), 8, 1)
    greenAvg = cv.CreateImage(cv.GetSize(frame), 8, 1)
    blueAvg = cv.CreateImage(cv.GetSize(frame), 8, 1)
    resImg = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.Split(frame, blueChannel, greenChannel, redChannel, None)

    hsvImg = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.CvtColor(frame, hsvImg, cv.CV_BGR2HSV)
    hueChannel = cv.CreateImage(cv.GetSize(frame), 8, 1)
    satChannel = cv.CreateImage(cv.GetSize(frame), 8, 1)
    valChannel = cv.CreateImage(cv.GetSize(frame), 8, 1)
    cv.Split(hsvImg, hueChannel, satChannel, valChannel, None)

    for y in range(0, frame.height):
        for x in range(0, frame.width):
            redValue = cv.GetReal2D(redChannel, y, x)
            greenValue = cv.GetReal2D(greenChannel, y, x)
            blueValue = cv.GetReal2D(blueChannel, y, x)
            sum = redValue + greenValue + blueValue + 0.0
            if (sum < 1.0):
                sum = 1.0
            cv.SetReal2D(redAvg, y, x, redValue / sum * 255)
            cv.SetReal2D(greenAvg, y, x, greenValue / sum * 255)
            cv.SetReal2D(blueAvg, y, x, blueValue / sum * 255)
    cv.Merge(blueAvg, greenAvg, redAvg, None, resImg)
    del redChannel
    del greenChannel
    del blueChannel
    del redAvg
    del greenAvg
    del blueAvg
    return (resImg)
Пример #28
0
    def combine_images(self, images):
        """ render a list of images into one opencv frame """
        comb_width = self.smallwidth * XWINDOWS
        comb_height = self.smallheight * int(
            math.ceil(len(images) / float(XWINDOWS)))
        self.combined = cv.CreateImage((comb_width, comb_height),
                                       cv.IPL_DEPTH_8U, 3)

        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.3, 0.3)

        for i, (image, name) in enumerate(images):
            if image.nChannels == 1:
                cv.Merge(image, image, image, None, self.temp)
            else:
                cv.Copy(image, self.temp)
            xoffset = (i % XWINDOWS) * self.smallsize[0]
            yoffset = (i / XWINDOWS) * self.smallsize[1]
            cv.SetImageROI(
                self.combined,
                (xoffset, yoffset, self.smallsize[0], self.smallsize[1]))
            cv.Copy(self.temp, self.combined)
            cv.PutText(self.combined, name, (5, 10), font, (30, 200, 200))
            cv.ResetImageROI(self.combined)
        return self.combined
Пример #29
0
def ray_plane_intersections(rays, planes):
    rows = rays.height
    cols = rays.width

    rays_split = [None] * 3
    for i in range(3):
        rays_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Split(rays, rays_split[0], rays_split[1], rays_split[2], None)

    planes_split = [None] * 4
    for i in range(4):
        planes_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Split(planes, planes_split[0], planes_split[1], planes_split[2],
             planes_split[3])

    n_dot_v = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.SetZero(n_dot_v)
    for i in range(3):
        temp = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Mul(planes_split[i], rays_split[i], temp)
        cv.Add(temp, n_dot_v, n_dot_v)
    depth = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Div(planes_split[3], n_dot_v, depth)

    intersection_points_split = [None] * 3
    for i in range(3):
        intersection_points_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)

    for i in range(3):
        cv.Mul(depth, rays_split[i], intersection_points_split[i])

    intersection_points = cv.CreateMat(rows, cols, cv.CV_32FC3)
    cv.Merge(intersection_points_split[0], intersection_points_split[1],
             intersection_points_split[2], None, intersection_points)

    return intersection_points
    def capture(self):
        blank = self.get_picture_of_projection(self.blank_projection)
        positive = self.get_picture_of_projection(
            self.positive_chessboard_projection)
        negative = self.get_picture_of_projection(
            self.negative_chessboard_projection)

        difference = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_8UC1)
        cv.Sub(positive, negative, difference)

        cv.NamedWindow("blank", flags=0)
        cv.ShowImage("blank", blank)
        cv.WaitKey(300)

        cv.NamedWindow("difference", flags=0)
        cv.ShowImage("difference", difference)
        cv.WaitKey(300)

        camera_image_points, camera_object_points = detect_chessboard(
            blank, self.printed_chessboard_corners_x,
            self.printed_chessboard_corners_y, self.printed_chessboard_spacing,
            self.printed_chessboard_spacing)
        if camera_image_points is None:
            return False
        cv.UndistortPoints(camera_image_points, camera_image_points,
                           self.camera_model.intrinsicMatrix(),
                           self.camera_model.distortionCoeffs())
        homography = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.FindHomography(camera_image_points, camera_object_points,
                          homography)
        object_points, dummy = detect_chessboard(
            difference, self.projected_chessboard_corners_x,
            self.projected_chessboard_corners_y, None, None)
        if object_points is None:
            return False
        cv.UndistortPoints(object_points, object_points,
                           self.camera_model.intrinsicMatrix(),
                           self.camera_model.distortionCoeffs())

        cv.PerspectiveTransform(object_points, object_points, homography)

        object_points_3d = cv.CreateMat(
            1, self.projected_chessboard_corners_x *
            self.projected_chessboard_corners_y, cv.CV_32FC3)

        x = cv.CreateMat(
            1, self.projected_chessboard_corners_x *
            self.projected_chessboard_corners_y, cv.CV_32FC1)
        y = cv.CreateMat(
            1, self.projected_chessboard_corners_x *
            self.projected_chessboard_corners_y, cv.CV_32FC1)
        cv.Split(object_points, x, y, None, None)
        z = cv.CreateMat(
            1, self.projected_chessboard_corners_x *
            self.projected_chessboard_corners_y, cv.CV_32FC1)
        cv.SetZero(z)

        cv.Merge(x, y, z, None, object_points_3d)

        self.object_points.append(object_points_3d)
        self.image_points.append(self.get_scene_image_points())
        self.number_of_scenes += 1

        return True