Ejemplo n.º 1
0
def setup(flipped, capture, thehandcolor):
    """Initializes camera and finds initial skin tone"""

    #creates initial window and prepares text
    color = (40, 0, 0)
    font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0)
    textsize1 = (cv.GetSize(flipped)[0] / 2 - 150,
                 cv.GetSize(flipped)[1] / 2 - 140)
    textsize2 = (cv.GetSize(flipped)[0] / 2 - 150,
                 cv.GetSize(flipped)[1] / 2 - 110)
    point1 = (cv.GetSize(flipped)[0] / 2 - 25, cv.GetSize(flipped)[1] / 2 - 25)
    point2 = (cv.GetSize(flipped)[0] / 2 + 25, cv.GetSize(flipped)[1] / 2 + 25)

    #until Enter is pressed
    while (cv.WaitKey(10) != 10):

        #captures live video, and draws sub-box and text
        frame = cv.QueryFrame(capture)
        cv.Copy(frame, flipped)
        cv.Flip(flipped, flipped, 1)
        cv.Rectangle(flipped, point1, point2, color, 2)
        cv.PutText(flipped, "Put your hand in the box ", textsize1, font,
                   color)
        cv.PutText(flipped, "and press enter", textsize2, font, color)
        cv.ShowImage("w2", flipped)

    #Creates sub-image inside box, and returns average color in box
    sub = cv.GetSubRect(flipped, (cv.GetSize(flipped)[0] / 2 - 25,
                                  cv.GetSize(flipped)[1] / 2 - 25, 50, 50))
    cv.Set(thehandcolor, cv.Avg(sub))
    return cv.Avg(sub)
Ejemplo n.º 2
0
def GetCornerType(x, y, img):
    """Get type of corner point: upper left, bottom left, upper right, bottom right, vertical half, horizontal half, 
  also white on dark background or dark on white background. Greyscale image is presumed"""
    x1 = int(x)
    y1 = int(y)
    height = 5
    width = 5
    rect1 = cv.Avg(cv.GetSubRect(img, (x1, y1, width, height)))
    rect2 = cv.Avg(cv.GetSubRect(img, (x1 - width, y1, width, height)))
    rect3 = cv.Avg(cv.GetSubRect(img,
                                 (x1 - width, y1 - height, width, height)))
    rect4 = cv.Avg(cv.GetSubRect(img, (x1, y1 - height, width, height)))
    averages = [rect1[0], rect2[0], rect3[0], rect4[0]]
    clusters = cluster_points(averages)
    if (len(clusters[0]) == 2):
        return ("bad", "")
    else:
        fg, bg = None, None
        corner = None
        if (len(clusters[0]) == 1):
            fg = averages[clusters[0][0]]
            bg = averages[clusters[1][0]]
            corner = clusters[0][0]
        else:
            fg = averages[clusters[1][0]]
            bg = averages[clusters[0][0]]
            corner = clusters[1][0]

        if (fg > bg):
            return (corner, "w")
        else:
            return (corner, "b")
Ejemplo n.º 3
0
def estaMarcada( marcadog , punto, ancho):
  sub = cv.GetSubRect(marcadog, (punto[0], punto[1], ancho, ancho))
  media = cv.Avg(sub)
  print "Media para punto"+ str(punto) +": " + str(cv.Avg(sub))
  opencv.cvReleaseImage(sub)
  if media[0] < 250:
    return True
  else:
    return False
Ejemplo n.º 4
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    window = cv.CreateImage((cv.Round(img.width), cv.Round(img.height)), 8, 3)
    if (cascade):
        t = cv.GetTickCount()
        faces = local_haar_detect(small_img, cascade, cv.CreateMemStorage(0),
                                  haar_scale, min_neighbors, haar_flags,
                                  min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        channels = None
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (cv.Round(
                    (x + w * .2) * image_scale), cv.Round(y * image_scale))
                pt2 = (cv.Round(
                    (x + w * .8) * image_scale), cv.Round(
                        (y + h) * image_scale))

                window = cv.CreateImage((cv.Round(w * .6) * image_scale,
                                         cv.Round(h) * image_scale), 8, 3)
                cv.Smooth(window, window, cv.CV_GAUSSIAN)
                channels = [
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1),
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1),
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1)
                ]
                cv.GetRectSubPix(img, window, (cv.Round(
                    (pt1[0] + pt2[0]) / 2.0), cv.Round(
                        (pt1[1] + pt2[1]) / 2.0)))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                cv.Split(window, channels[0], channels[1], channels[2], None)
                result.append([
                    cv.Avg(channels[0])[0],
                    cv.Avg(channels[1])[0],
                    cv.Avg(channels[2])[0]
                ])

    cv.ShowImage("result", img)
Ejemplo n.º 5
0
def ccoeff_normed(img1, img2):
	size = cv.GetSize(img1)
	tmp1 = float_version(img1)
	tmp2 = float_version(img2)

	cv.SubS(tmp1, cv.Avg(tmp1), tmp1)
	cv.SubS(tmp2, cv.Avg(tmp2), tmp2)

	norm1 = cv.CloneImage(tmp1)
	norm2 = cv.CloneImage(tmp2)
	cv.Pow(tmp1, norm1, 2.0)
	cv.Pow(tmp2, norm2, 2.0)

	#cv.Mul(tmp1, tmp2, tmp1)

	return cv.DotProduct(tmp1, tmp2) /  (cv.Sum(norm1)[0]*cv.Sum(norm2)[0])**0.5
Ejemplo n.º 6
0
def isWhite(img, x, y, thresh=-1, size=6):
    if thresh == -1:
        #set threshold as image average intensity
        avg = cv.Avg(img)
        thresh = (avg[0] + avg[1] + avg[2]) / 3.0

    return mean(img, int(x), int(y), size) > thresh
Ejemplo n.º 7
0
    def sample_frame(self, frame):
        # Get an average of the green channel in on the forehead
        cv.SetImageROI(frame, self.face_tracker.get_forehead())
        sample = cv.Avg(frame)[1]
        cv.ResetImageROI(frame)

        return sample
Ejemplo n.º 8
0
def getBrightness(img):
  hue = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U,1)
  sat = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U,1)
  val = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U,1)
  test = cv.CloneImage(img)
  cv.CvtColor(img,test, cv.CV_BGR2HSV)
  cv.Split(img, hue, sat,val,None)
  	
  return cv.Avg(val)[0]
Ejemplo n.º 9
0
def cam_measurebulk(nframes=100,
                    interactive=True,
                    show=True,
                    norm=False,
                    verb=0):
    """
	Take **nframes** frames and average these. If **norm** is set, set the 
	average of the summed frame to unity, otherwise it is divided by the 
	number of frames.

	This routine is intended to measure flat and dark frames. Flat frames 
	might be normalized such that dividing by these does not affect the 
	average intensity of the input frame. Dark frames should never be 
	normalized.

	The flatfield is stored in CAM_CFG['flat'] and is used automatically 
	from then on.

	@param [in] nframes Number of frames to average
	@param [in] show Show flat field + one correct image when done
	@param [in] verb Verbosity
	@return Summed and scaled frame.
	"""

    if (verb & VERB_M > L_INFO):
        print "Measuring bulk (n=%d)..." % (nframes)

    if (interactive):
        print "Will measure bulk now, press c to continue..."
        while (True):
            cam_getimage(show=True, waitkey=0)
            if (chr(cv.WaitKey(1) & 255) == "c"):
                print "ok!"
                break

    bulkimg = cam_getimage(show=False, dfcorr=False, raw=True)

    for dummy in xrange(nframes - 1):
        cv.Add(bulkimg, cam_getimage(show=False, dfcorr=False, raw=True),
               bulkimg)

    if (norm):
        cv.ConvertScale(bulkimg, bulkimg, scale=1.0 / cv.Avg(bulkimg)[0])
    else:
        cv.ConvertScale(bulkimg, bulkimg, scale=1.0 / nframes)

    if (show):
        cv.NamedWindow("cam_bulkimg", cv.CV_WINDOW_AUTOSIZE)
        cv.ShowImage('cam_bulkimg', bulkimg)
        c = cv.WaitKey(20)

    return bulkimg
Ejemplo n.º 10
0
def dct_hash(img):
    img = float_version(img)
    small_img = cv.CreateImage((32, 32), 32, 1)
    cv.Resize(img[20:190, 20:205], small_img)

    dct = cv.CreateMat(32, 32, cv.CV_32FC1)
    cv.DCT(small_img, dct, cv.CV_DXT_FORWARD)
    dct = dct[1:9, 1:9]

    avg = cv.Avg(dct)[0]
    dct_bit = cv.CreateImage((8, 8), 8, 1)
    cv.CmpS(dct, avg, dct_bit, cv.CV_CMP_GT)

    return [dct_bit[y, x] == 255.0 for y in xrange(8) for x in xrange(8)]
Ejemplo n.º 11
0
    def tryToDetect(self):
        if time.time() - self.lastTime >= 0.5:
            self.current = self.getGrayImage(cv.GetSize(self.previous))
            diffImg = cv.CloneImage(self.previous)
            cv.AbsDiff(self.previous, self.current, diffImg)
            avg = cv.Avg(diffImg)

            self.previous = cv.CloneImage(self.current)
            self.lastTime = time.time()

            self.motionFactor = self.getMotionFactor(avg[0])
            self.actualAvgDiff = avg[0]
        else:
            self.getFrame()
Ejemplo n.º 12
0
def mean(img, x, y, size=10):
    oldRoi = cv.GetImageROI(img)

    #make sure roi is within image bounds
    x = min(x, img.width - 1)
    x = max(x, 0)
    y = min(y, img.height - 1)
    y = max(y, 0)

    cv.SetImageROI(img, (x - size / 2, y - size / 2, size, size))
    avg = cv.Avg(img)

    cv.SetImageROI(img, oldRoi)

    return (avg[0] + avg[1] + avg[2]) / 3.0
 def get_predator_distance(self, bb, depth):
     self.logger.debug("Bounding Box: " + str(bb))
     if bb[0] < 0:
         bb[0] = 0
     if bb[2] >= self.res['width']:
         bb[2] = self.res['width'] - 1
     if bb[1] < 0:
         bb[1] = 0
     if bb[3] >= self.res['height']:
         bb[3] = self.res['height'] - 1
     dist_rect = cv.CreateImage((bb[2] - bb[0], bb[3] - bb[1]),
                                cv.IPL_DEPTH_8U, 1)
     dist_rect = cv.GetSubRect(depth,
                               (bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1]))
     return cv.Avg(dist_rect)[0]
Ejemplo n.º 14
0
def show_depth():
    global threshold
    global current_depth

    depth, timestamp = freenect.sync_get_depth()
    viewable = frame_convert.full_depth_cv(depth)
    cv.Rectangle(viewable, sense_pt1, sense_pt2, (255, 0, 0), 1)
    cv.ShowImage('Depth', viewable)
    roi = cv.GetSubRect(frame_convert.raw_depth_cv(depth), sense_rect)
    pix = cv.Avg(roi)[0]
    (roimin, roimax, a, b) = cv.MinMaxLoc(roi)
    if roimax < 1090:
        dist = 350.0 / (1091 - pix)
        print "%f %i %i" % (dist, roimin, roimax)
    else:
        print "XX"
Ejemplo n.º 15
0
def glyphRec(image):

    storage = cv.CreateMemStorage(0)

    contrast = cv.CreateImage(cv.GetSize(image), 8, 3)
    grey = cv.CreateImage(cv.GetSize(image), 8, 1)
    canny = cv.CreateImage(cv.GetSize(grey), cv.IPL_DEPTH_8U, 1)

    #increase contrast
    avg = cv.Avg(image)
    cv.AddS(image, cv.Scalar(-.5 * avg[0], -.5 * avg[1], -.5 * avg[2]),
            contrast)
    cv.Scale(contrast, contrast, 3)

    #make grayscale
    cv.CvtColor(contrast, grey, cv.CV_BGR2GRAY)

    #smooth
    cv.Smooth(grey, grey, cv.CV_GAUSSIAN, 3, 3)

    #edge detect
    cv.Canny(grey, canny, 20, 200, 3)

    #smooth again
    cv.Smooth(canny, canny, cv.CV_GAUSSIAN, 3, 3)

    #find lines
    lines = cv.HoughLines2(canny, storage, cv.CV_HOUGH_PROBABILISTIC, 3,
                           math.pi / 180, 50, 150, 40)

    #find corners
    corners = getCorners(lines)

    #find quadrilaterals
    quad = findGlpyh(contrast, corners)

    if quad == None:
        return None

    drawQuad(image, quad)

    grid = readGlyph(image, quad)
    printGrid(grid)
    print ''
    toCoords(grid)

    return grid
Ejemplo n.º 16
0
    def extract_hue_channel(self, cv_hue_image, bound_range):
        size = (cv_hue_image.width, cv_hue_image.height)
        tmp = cv.CreateImage(size, 8, 1)
        mask = cv.CreateImage(size, 8, 1)

        start, end = bound_range

        cv.Zero(tmp)

        cv.InRangeS(cv_hue_image, int(start), int(end), mask)
        # copy pixels in range from hue
        cv.AddS(cv_hue_image, 1, tmp, mask)

        avg_hue = cv.Avg(tmp, mask)
        avg_hue = avg_hue[0]

        return tmp, avg_hue
Ejemplo n.º 17
0
    def _get_depth(self, depth_image, debug=False):
        """Get the depth reading from the Kinect"""
        depth = None

        # Only use part of the span to avoid anything else than the Crazyflie
        img_th = cv.CreateImage(cv.GetSize(depth_image), 8, 1);
        cv.InRangeS(depth_image, 10, 210, img_th);

        # Calculate the mean depth
        depth = cv.Avg(depth_image, img_th)[0]

        if debug:
            font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1) 
            s2 = "%d" % depth
            cv.PutText(img_th, s2, (0,60),font, 200)
            cv.ShowImage('depth th', img_th)

        return depth
Ejemplo n.º 18
0
def difference(il,tol):
    if(len(list(il))<2):
        return True

    i1=list(il)[0]
    i2=list(il)[1]
    temp = cv.CreateImage((i1.width,i1.height),i1.depth,i1.nChannels)
    cv.AbsDiff(i1,i2,temp)
    #cv.ConvertScale(temp,temp,20)
    #cv.ShowImage("difference",temp)
    mean = cv.Avg(temp)
    #cv.SetImageCOI(temp,2)
    #mmlvec = cv.MinMaxLoc(temp)
    #print mean,mmlvec

    mean = (mean[0]+mean[1]+mean[2])/3
    if(mean > tol):
        return True
    return False
Ejemplo n.º 19
0
    def run(self):
        while True:
            if time.time() - self.lastTime >= 0.5:
                self.current = self.getGrayImage(cv.GetSize(self.previous))
                diffImg = cv.CloneImage(self.previous)
                cv.AbsDiff(self.previous, self.current, diffImg)
                avg = cv.Avg(diffImg)
                #print "\t" + str(avg[0] > 5)
                print "\t" + str(avg[0])

                self.previous = cv.CloneImage(self.current)
                self.lastTime = time.time()
            else:
                self.getFrame()

            if time.time() - self.startTime > 20:
                break

        print "\n>> END <<\n"
Ejemplo n.º 20
0
def callback(data):
    global ledVal
    cv_image = bridge.imgmsg_to_cv(data, "bgr8")
    (reds, greens, blues, x) = cv.Avg(cv_image)

    (h, s, v) = colorsys.rgb_to_hsv(reds, greens, blues)
    s = 1
    (reds, greens, blues) = colorsys.hsv_to_rgb(h, s, v)

    ledVal = 0
    ledVal |= (int(reds) & 0xFF) << 16
    ledVal |= (int(greens) & 0xFF) << 8
    ledVal |= (int(blues) & 0xFF)

    print int(reds), int(greens), int(blues)

    color = cv.CV_RGB(int(blues), int(greens), int(reds))
    cv.Rectangle(cv_image, (0, 0), (100, 100), color, thickness=20)
    cv.ShowImage("image", cv_image)
    cv.WaitKey(1)
Ejemplo n.º 21
0
camframe = cv.CloneImage(darkframe)
diffframe = cv.CloneImage(darkframe)

# Make real flat field
print "Taking 100 flats..."
frame = cv.GetSubRect(cv.QueryFrame(CAM_CFG['handler']), CAM_CFG['roi'])
cv.ConvertScale(frame, camframe, scale=1.0 / 256)
flatframe = cv.CloneImage(camframe)

for i in xrange(9):
    print ".",
    frame = cv.GetSubRect(cv.QueryFrame(CAM_CFG['handler']), CAM_CFG['roi'])
    cv.ConvertScale(frame, camframe, scale=1.0 / 256)
    cv.Add(flatframe, camframe, flatframe)

cv.ConvertScale(flatframe, flatframe, scale=1.0 / cv.Avg(flatframe)[0])

# flatarr = np.linspace(2.0, 0.5, darkarr.shape[0]).reshape(-1,1)
# flatarr = np.dot(flatarr, np.ones((1, darkarr.shape[1])))
# flatframe = array2cv(flatarr)

# Get new frame. QueryFrame returns a pointer to the internal data. We
# immediately clone the frame because we want to modify it.
cv.ConvertScale(frame, lastframe, scale=1.0 / 256)
cv.ShowImage("cam_live", lastframe)

# lastarr = cv2array(lastframe)
# framearr = cv2array(frame)

mask = np.zeros((2 * rad, 2 * rad), dtype=np.uint8)
mask[:] = True
Ejemplo n.º 22
0
def main():
    BLACK_AND_WHITE = False
    THRESHOLD = 0.48
    BW_THRESHOLD = 0.4

    os.chdir(sys.argv[1])
    try:
        os.mkdir(OUTPUT_DIR_NAME)
    except:
        pass

    if len(sys.argv) > 2:
        if sys.argv[2] == "bw":
            BLACK_AND_WHITE = True
            THRESHOLD = BW_THRESHOLD
            print "##########"
            print " B/W MODE"
            print "##########"

    tree = et.parse("project.xml")
    movie = tree.getroot()
    file_path = movie.attrib["path"]
    cap = cv.CreateFileCapture(file_path)

    if DEBUG:
        cv.NamedWindow("win", cv.CV_WINDOW_AUTOSIZE)
        cv.MoveWindow("win", 200, 200)

    hist = None
    prev_hist = None
    prev_img = None

    pixel_count = None
    frame_counter = 0

    last_frame_black = False
    black_frame_start = -1

    t = time.time()

    while 1:
        img_orig = cv.QueryFrame(cap)

        if not img_orig:  # eof
            cv.SaveImage(OUTPUT_DIR_NAME + "\\%06d.png" % (frame_counter - 1),
                         prev_img)
            """movie.set("frames", str(frame_counter))
			tree.write("project.xml")"""
            break

        img = cv.CreateImage(
            (int(img_orig.width / 4), int(img_orig.height / 4)),
            cv.IPL_DEPTH_8U, 3)
        cv.Resize(img_orig, img, cv.CV_INTER_AREA)

        if frame_counter == 0:  # erster frame
            cv.SaveImage(OUTPUT_DIR_NAME + "\\%06d.png" % (0), img)
            pixel_count = img.width * img.height
            prev_img = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
            cv.Zero(prev_img)

        if DEBUG and frame_counter % 2 == 1:
            cv.ShowImage("win", img)

        img_hsv = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(img, img_hsv, cv.CV_BGR2HSV)

        # #####################
        # METHOD #1: find the number of pixels that have (significantly) changed since the last frame
        diff = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 3)
        cv.AbsDiff(img_hsv, prev_img, diff)
        cv.Threshold(diff, diff, 10, 255, cv.CV_THRESH_BINARY)
        d_color = 0
        for i in range(1, 4):
            cv.SetImageCOI(diff, i)
            d_color += float(cv.CountNonZero(diff)) / float(pixel_count)

        if not BLACK_AND_WHITE:
            d_color = float(d_color / 3.0)  # 0..1

        # #####################
        # METHOD #2: calculate the amount of change in the histograms
        h_plane = cv.CreateMat(img.height, img.width, cv.CV_8UC1)
        s_plane = cv.CreateMat(img.height, img.width, cv.CV_8UC1)
        v_plane = cv.CreateMat(img.height, img.width, cv.CV_8UC1)
        cv.Split(img_hsv, h_plane, s_plane, v_plane, None)
        planes = [h_plane, s_plane, v_plane]

        hist_size = [50, 50, 50]
        hist_range = [[0, 360], [0, 255], [0, 255]]
        if not hist:
            hist = cv.CreateHist(hist_size, cv.CV_HIST_ARRAY, hist_range, 1)
        cv.CalcHist([cv.GetImage(i) for i in planes], hist)
        cv.NormalizeHist(hist, 1.0)

        if not prev_hist:
            prev_hist = cv.CreateHist(hist_size, cv.CV_HIST_ARRAY, hist_range,
                                      1)
            # wieso gibt es kein cv.CopyHist()?!
            cv.CalcHist([cv.GetImage(i) for i in planes], prev_hist)
            cv.NormalizeHist(prev_hist, 1.0)
            continue

        d_hist = cv.CompareHist(prev_hist, hist, cv.CV_COMP_INTERSECT)

        # combine both methods to make a decision
        if ((0.4 * d_color + 0.6 * (1 - d_hist))) >= THRESHOLD:
            if DEBUG:
                if frame_counter % 2 == 0:
                    cv.ShowImage("win", img)
                winsound.PlaySound(soundfile,
                                   winsound.SND_FILENAME | winsound.SND_ASYNC)
            print "%.3f" % ((0.4 * d_color + 0.6 * (1 - d_hist))), "%.3f" % (
                d_color), "%.3f" % (1 - d_hist), frame_counter
            if DEBUG and DEBUG_INTERACTIVE:
                if win32api.MessageBox(0, "cut?", "",
                                       win32con.MB_YESNO) == 6:  #yes
                    cv.SaveImage(
                        OUTPUT_DIR_NAME + "\\%06d.png" % (frame_counter), img)
            else:
                cv.SaveImage(OUTPUT_DIR_NAME + "\\%06d.png" % (frame_counter),
                             img)

        cv.CalcHist([cv.GetImage(i) for i in planes], prev_hist)
        cv.NormalizeHist(prev_hist, 1.0)

        # #####################
        # METHOD #3: detect series of (almost) black frames as an indicator for "fade to black"
        average = cv.Avg(v_plane)[0]
        if average <= 0.6:
            if not last_frame_black:  # possible the start
                print "start", frame_counter
                black_frame_start = frame_counter
            last_frame_black = True
        else:
            if last_frame_black:  # end of a series of black frames
                cut_at = black_frame_start + int(
                    (frame_counter - black_frame_start) / 2)
                print "end", frame_counter, "cut at", cut_at
                img_black = cv.CreateImage(
                    (img_orig.width / 4, img_orig.height / 4), cv.IPL_DEPTH_8U,
                    3)
                cv.Set(img_black, cv.RGB(0, 255, 0))
                cv.SaveImage(OUTPUT_DIR_NAME + "\\%06d.png" % (cut_at),
                             img_black)
            last_frame_black = False

        cv.Copy(img_hsv, prev_img)
        frame_counter += 1

        if DEBUG:
            if cv.WaitKey(1) == 27:
                break

    if DEBUG:
        cv.DestroyWindow("win")

    print "%.2f min" % ((time.time() - t) / 60)
    #raw_input("- done -")
    return
Ejemplo n.º 23
0
def old_GeneratePerceptualHash(path):

    # I think what I should be doing here is going cv2.imread( path, flags = cv2.CV_LOAD_IMAGE_GRAYSCALE )
    # then efficiently resize

    thumbnail = GeneratePILImage(path)

    # convert to 32 x 32 greyscale

    if thumbnail.mode == 'P':

        thumbnail = thumbnail.convert(
            'RGBA'
        )  # problem with some P images converting to L without RGBA step in between

    if thumbnail.mode == 'RGBA':

        # this is some code i picked up somewhere
        # another great example of PIL failing; it turns all alpha to pure black on a RGBA->RGB

        thumbnail.load()

        canvas = PILImage.new('RGB', thumbnail.size, (255, 255, 255))

        canvas.paste(thumbnail, mask=thumbnail.split()[3])

        thumbnail = canvas

    thumbnail = thumbnail.convert('L')

    thumbnail = thumbnail.resize((32, 32), PILImage.ANTIALIAS)

    # convert to mat

    numpy_thumbnail_8 = cv.CreateMatHeader(32, 32, cv.CV_8UC1)

    cv.SetData(numpy_thumbnail_8, thumbnail.tostring())

    numpy_thumbnail_32 = cv.CreateMat(32, 32, cv.CV_32FC1)

    cv.Convert(numpy_thumbnail_8, numpy_thumbnail_32)

    # compute dct

    dct = cv.CreateMat(32, 32, cv.CV_32FC1)

    cv.DCT(numpy_thumbnail_32, dct, cv.CV_DXT_FORWARD)

    # take top left 8x8 of dct

    dct = cv.GetSubRect(dct, (0, 0, 8, 8))

    # get mean of dct, excluding [0,0]

    mask = cv.CreateMat(8, 8, cv.CV_8U)

    cv.Set(mask, 1)

    mask[0, 0] = 0

    channel_averages = cv.Avg(dct, mask)

    average = channel_averages[0]

    # make a monochromatic, 64-bit hash of whether the entry is above or below the mean

    bytes = []

    for i in range(8):

        byte = 0

        for j in range(8):

            byte <<= 1  # shift byte one left

            value = dct[i, j]

            if value > average: byte |= 1

        bytes.append(byte)

    answer = str(bytearray(bytes))

    # we good

    return answer
Ejemplo n.º 24
0
 def getMotionFactor(self, diff):
     avg = cv.Avg(diff)
     if avg[0] >= self.threshold:
         return 100
     else:
         return round((avg[0] / self.threshold) * 100)
Ejemplo n.º 25
0
def doSSIM(frame1, frame2):
    '''
    The equivalent of Zhou Wang's SSIM matlab code using OpenCV.
    from http://www.cns.nyu.edu/~zwang/files/research/ssim/index.html
    The measure is described in :
    "Image quality assessment: From error measurement to structural similarity"
    C++ code by Rabah Mehdi. http://mehdi.rabah.free.fr/SSIM

    C++ to Python translation and adaptation by Iñaki Úcar
    '''
    def array2cv(a):
        dtype2depth = {
            'uint8': cv.IPL_DEPTH_8U,
            'int8': cv.IPL_DEPTH_8S,
            'uint16': cv.IPL_DEPTH_16U,
            'int16': cv.IPL_DEPTH_16S,
            'int32': cv.IPL_DEPTH_32S,
            'float32': cv.IPL_DEPTH_32F,
            'float64': cv.IPL_DEPTH_64F,
        }
        try:
            nChannels = a.shape[2]
        except:
            nChannels = 1
        cv_im = cv.CreateImageHeader((a.shape[1], a.shape[0]),
                                     dtype2depth[str(a.dtype)], nChannels)
        cv.SetData(cv_im, a.tostring(),
                   a.dtype.itemsize * nChannels * a.shape[1])
        return cv_im

    C1 = 6.5025
    C2 = 58.5225
    img1_temp = array2cv(frame1)
    img2_temp = array2cv(frame2)
    nChan = img1_temp.nChannels
    d = cv.IPL_DEPTH_32F
    size = img1_temp.width, img1_temp.height
    img1 = cv.CreateImage(size, d, nChan)
    img2 = cv.CreateImage(size, d, nChan)
    cv.Convert(img1_temp, img1)
    cv.Convert(img2_temp, img2)
    img1_sq = cv.CreateImage(size, d, nChan)
    img2_sq = cv.CreateImage(size, d, nChan)
    img1_img2 = cv.CreateImage(size, d, nChan)
    cv.Pow(img1, img1_sq, 2)
    cv.Pow(img2, img2_sq, 2)
    cv.Mul(img1, img2, img1_img2, 1)
    mu1 = cv.CreateImage(size, d, nChan)
    mu2 = cv.CreateImage(size, d, nChan)
    mu1_sq = cv.CreateImage(size, d, nChan)
    mu2_sq = cv.CreateImage(size, d, nChan)
    mu1_mu2 = cv.CreateImage(size, d, nChan)
    sigma1_sq = cv.CreateImage(size, d, nChan)
    sigma2_sq = cv.CreateImage(size, d, nChan)
    sigma12 = cv.CreateImage(size, d, nChan)
    temp1 = cv.CreateImage(size, d, nChan)
    temp2 = cv.CreateImage(size, d, nChan)
    temp3 = cv.CreateImage(size, d, nChan)
    ssim_map = cv.CreateImage(size, d, nChan)
    #/*************************** END INITS **********************************/
    #// PRELIMINARY COMPUTING
    cv.Smooth(img1, mu1, cv.CV_GAUSSIAN, 11, 11, 1.5)
    cv.Smooth(img2, mu2, cv.CV_GAUSSIAN, 11, 11, 1.5)
    cv.Pow(mu1, mu1_sq, 2)
    cv.Pow(mu2, mu2_sq, 2)
    cv.Mul(mu1, mu2, mu1_mu2, 1)
    cv.Smooth(img1_sq, sigma1_sq, cv.CV_GAUSSIAN, 11, 11, 1.5)
    cv.AddWeighted(sigma1_sq, 1, mu1_sq, -1, 0, sigma1_sq)
    cv.Smooth(img2_sq, sigma2_sq, cv.CV_GAUSSIAN, 11, 11, 1.5)
    cv.AddWeighted(sigma2_sq, 1, mu2_sq, -1, 0, sigma2_sq)
    cv.Smooth(img1_img2, sigma12, cv.CV_GAUSSIAN, 11, 11, 1.5)
    cv.AddWeighted(sigma12, 1, mu1_mu2, -1, 0, sigma12)
    #//////////////////////////////////////////////////////////////////////////
    #// FORMULA
    #// (2*mu1_mu2 + C1)
    cv.Scale(mu1_mu2, temp1, 2)
    cv.AddS(temp1, C1, temp1)
    #// (2*sigma12 + C2)
    cv.Scale(sigma12, temp2, 2)
    cv.AddS(temp2, C2, temp2)
    #// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
    cv.Mul(temp1, temp2, temp3, 1)
    #// (mu1_sq + mu2_sq + C1)
    cv.Add(mu1_sq, mu2_sq, temp1)
    cv.AddS(temp1, C1, temp1)
    #// (sigma1_sq + sigma2_sq + C2)
    cv.Add(sigma1_sq, sigma2_sq, temp2)
    cv.AddS(temp2, C2, temp2)
    #// ((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
    cv.Mul(temp1, temp2, temp1, 1)
    #// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
    cv.Div(temp3, temp1, ssim_map, 1)
    index_scalar = cv.Avg(ssim_map)
    #// through observation, there is approximately
    #// 1% error max with the original matlab program
    return index_scalar[0]
Ejemplo n.º 26
0
import cv

image = cv.LoadImage('eben.jpg', cv.CV_LOAD_IMAGE_COLOR)

# print some image properties
print 'Depth:', image.depth, '# Channels:', image.nChannels
print 'Size:', image.width, image.height
print 'Pixel values average', cv.Avg(image)

# create the window
cv.NamedWindow('my window', cv.CV_WINDOW_AUTOSIZE)
cv.ShowImage('my window', image)  # show the image
cv.WaitKey()  # the window will be closed with a (any)key press
Ejemplo n.º 27
0
 def __SSIM(self, frame1, frame2):
     """
         The equivalent of Zhou Wang's SSIM matlab code using OpenCV.
         from http://www.cns.nyu.edu/~zwang/files/research/ssim/index.html
         The measure is described in :
         "Image quality assessment: From error measurement to structural similarity"
         C++ code by Rabah Mehdi. http://mehdi.rabah.free.fr/SSIM
         
         C++ to Python translation and adaptation by Iñaki Úcar
     """
     C1 = 6.5025
     C2 = 58.5225
     img1_temp = self.__array2cv(frame1)
     img2_temp = self.__array2cv(frame2)
     nChan = img1_temp.nChannels
     d = cv.IPL_DEPTH_32F
     size = img1_temp.width, img1_temp.height
     img1 = cv.CreateImage(size, d, nChan)
     img2 = cv.CreateImage(size, d, nChan)
     cv.Convert(img1_temp, img1)
     cv.Convert(img2_temp, img2)
     img1_sq = cv.CreateImage(size, d, nChan)
     img2_sq = cv.CreateImage(size, d, nChan)
     img1_img2 = cv.CreateImage(size, d, nChan)
     cv.Pow(img1, img1_sq, 2)
     cv.Pow(img2, img2_sq, 2)
     cv.Mul(img1, img2, img1_img2, 1)
     mu1 = cv.CreateImage(size, d, nChan)
     mu2 = cv.CreateImage(size, d, nChan)
     mu1_sq = cv.CreateImage(size, d, nChan)
     mu2_sq = cv.CreateImage(size, d, nChan)
     mu1_mu2 = cv.CreateImage(size, d, nChan)
     sigma1_sq = cv.CreateImage(size, d, nChan)
     sigma2_sq = cv.CreateImage(size, d, nChan)
     sigma12 = cv.CreateImage(size, d, nChan)
     temp1 = cv.CreateImage(size, d, nChan)
     temp2 = cv.CreateImage(size, d, nChan)
     temp3 = cv.CreateImage(size, d, nChan)
     ssim_map = cv.CreateImage(size, d, nChan)
     #/*************************** END INITS **********************************/
     #// PRELIMINARY COMPUTING
     cv.Smooth(img1, mu1, cv.CV_GAUSSIAN, 11, 11, 1.5)
     cv.Smooth(img2, mu2, cv.CV_GAUSSIAN, 11, 11, 1.5)
     cv.Pow(mu1, mu1_sq, 2)
     cv.Pow(mu2, mu2_sq, 2)
     cv.Mul(mu1, mu2, mu1_mu2, 1)
     cv.Smooth(img1_sq, sigma1_sq, cv.CV_GAUSSIAN, 11, 11, 1.5)
     cv.AddWeighted(sigma1_sq, 1, mu1_sq, -1, 0, sigma1_sq)
     cv.Smooth(img2_sq, sigma2_sq, cv.CV_GAUSSIAN, 11, 11, 1.5)
     cv.AddWeighted(sigma2_sq, 1, mu2_sq, -1, 0, sigma2_sq)
     cv.Smooth(img1_img2, sigma12, cv.CV_GAUSSIAN, 11, 11, 1.5)
     cv.AddWeighted(sigma12, 1, mu1_mu2, -1, 0, sigma12)
     #//////////////////////////////////////////////////////////////////////////
     #// FORMULA
     #// (2*mu1_mu2 + C1)
     cv.Scale(mu1_mu2, temp1, 2)
     cv.AddS(temp1, C1, temp1)
     #// (2*sigma12 + C2)
     cv.Scale(sigma12, temp2, 2)
     cv.AddS(temp2, C2, temp2)
     #// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
     cv.Mul(temp1, temp2, temp3, 1)
     #// (mu1_sq + mu2_sq + C1)
     cv.Add(mu1_sq, mu2_sq, temp1)
     cv.AddS(temp1, C1, temp1)
     #// (sigma1_sq + sigma2_sq + C2)
     cv.Add(sigma1_sq, sigma2_sq, temp2)
     cv.AddS(temp2, C2, temp2)
     #// ((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
     cv.Mul(temp1, temp2, temp1, 1)
     #// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
     cv.Div(temp3, temp1, ssim_map, 1)
     index_scalar = cv.Avg(ssim_map)
     #// through observation, there is approximately
     #// 1% error max with the original matlab program
     return index_scalar[0]
Ejemplo n.º 28
0
    def draw_circles_and_lines(self):

        # undetectednum 'fills in' a few detection to make
        # things look smoother in case we fall out one frame
        # for some reason
        if not self.detected:
            self.undetectednum += 1
            self.pt = self.lastpt

        if self.detected:
            self.undetectednum = 0
            self.lastpt = self.pt

        # convert to HSV
        cv.CvtColor(self.sgc, self.hsv, cv.CV_RGB2HSV)
        cv.Split(self.hsv, self.hue, self.sat, self.val, None)

        pt_int = []
        for (foo, bar) in self.pt:
            pt_int.append((int(foo), int(bar)))

        # do the drawing. pt array should store p,p1,p2
        self.p3 = (self.pt[2][0] + self.pt[1][0] - self.pt[0][0],
                   self.pt[2][1] + self.pt[1][1] - self.pt[0][1])
        p2_int = (int(self.p2[0]), int(self.p2[1]))
        p3_int = (int(self.p3[0]), int(self.p3[1]))

        cv.Line(self.sg, pt_int[0], pt_int[1], (0, 255, 0), 2)
        cv.Line(self.sg, pt_int[1], p3_int, (0, 255, 0), 2)
        cv.Line(self.sg, p3_int, pt_int[2], (0, 255, 0), 2)
        cv.Line(self.sg, pt_int[2], pt_int[0], (0, 255, 0), 2)

        # first sort the points so that 0 is BL 1 is UL and 2 is BR
        pt = winded(self.pt[0], self.pt[1], self.pt[2], self.p3)

        # find the coordinates of the 9 places we want to extract over
        self.v1 = (pt[1][0] - pt[0][0], pt[1][1] - pt[0][1])
        self.v2 = (pt[3][0] - pt[0][0], pt[3][1] - pt[0][1])
        self.p0 = (pt[0][0], pt[0][1])

        ep = []
        i = 1
        j = 5
        for k in range(9):
            ep.append(
                (self.p0[0] + i * self.v1[0] / 6.0 + j * self.v2[0] / 6.0,
                 self.p0[1] + i * self.v1[1] / 6.0 + j * self.v2[1] / 6.0))
            i = i + 2
            if i == 7:
                i = 1
                j = j - 2

        rad = ptdst(self.v1, (0.0, 0.0)) / 6.0
        cs = []
        center_pixels = []
        hsvcs = []
        den = 2

        for i, p in enumerate(ep):
            if p[0] > rad and p[0] < self.width - rad and p[1] > rad and p[
                    1] < self.height - rad:

                # valavg=val[int(p[1]-rad/3):int(p[1]+rad/3),int(p[0]-rad/3):int(p[0]+rad/3)]
                # mask=cv.CreateImage(cv.GetDims(valavg), 8, 1 )

                col = cv.Avg(
                    self.sgc[int(p[1] - rad / den):int(p[1] + rad / den),
                             int(p[0] - rad / den):int(p[0] + rad / den)])

                col = cv.Avg(
                    self.sgc[int(p[1] - rad / den):int(p[1] + rad / den),
                             int(p[0] - rad / den):int(p[0] + rad / den)])

                p_int = (int(p[0]), int(p[1]))
                cv.Circle(self.sg, p_int, int(rad), col, -1)

                if i == 4:
                    cv.Circle(self.sg, p_int, int(rad), (0, 255, 255), 2)
                else:
                    cv.Circle(self.sg, p_int, int(rad), (255, 255, 255), 2)

                hueavg = cv.Avg(
                    self.hue[int(p[1] - rad / den):int(p[1] + rad / den),
                             int(p[0] - rad / den):int(p[0] + rad / den)])
                satavg = cv.Avg(
                    self.sat[int(p[1] - rad / den):int(p[1] + rad / den),
                             int(p[0] - rad / den):int(p[0] + rad / den)])

                cv.PutText(self.sg, repr(int(hueavg[0])),
                           (p_int[0] + 70, p_int[1]), self.ff, (255, 255, 255))
                cv.PutText(self.sg, repr(int(satavg[0])),
                           (p_int[0] + 70, p_int[1] + 10), self.ff,
                           (255, 255, 255))

                if self.extract:
                    cs.append(col)
                    center_pixels.append((p_int[0] * den, p_int[1] * den))
                    hsvcs.append((hueavg[0], satavg[0]))

        if self.extract:
            self.extract = False
            self.colors[self.selected] = cs
            self.center_pixels[self.selected] = center_pixels
            self.hsvs[self.selected] = hsvcs
            self.selected = min(self.selected + 1, 5)
Ejemplo n.º 29
0
def print_mom0 (filename):

    fimage = cv.LoadImage (filename, cv.CV_LOAD_IMAGE_GRAYSCALE)
    v = cv.Avg (fimage);
    print v;
Ejemplo n.º 30
0
def detect_and_draw(img, cascade):

    global time_point
    global frame_no
    global input_data
    global fs
    global max_bps

    global last_f

    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    window = cv.CreateImage((cv.Round(img.width), cv.Round(img.height)), 8, 3)
    if (cascade):
        faces = local_haar_detect(small_img, cascade, cv.CreateMemStorage(0),
                                  haar_scale, min_neighbors, haar_flags,
                                  min_size)

        channels = None
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (cv.Round(
                    (x + w * .2) * image_scale), cv.Round(y * image_scale))
                pt2 = (cv.Round(
                    (x + w * .8) * image_scale), cv.Round(
                        (y + h) * image_scale))

                window = cv.CreateImage((cv.Round(w * .6) * image_scale,
                                         cv.Round(h) * image_scale), 8, 3)
                #cv.Smooth(window, window, cv.CV_GAUSSIAN, 3, 3)
                channels = [
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1),
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1),
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1)
                ]

                cv.GetRectSubPix(img, window, (cv.Round(
                    (pt1[0] + pt2[0]) / 2.0), cv.Round(
                        (pt1[1] + pt2[1]) / 2.0)))

                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                cv.Split(window, channels[0], channels[1], channels[2], None)
                input_data.append([
                    cv.Avg(channels[0])[0],
                    cv.Avg(channels[1])[0],
                    cv.Avg(channels[2])[0]
                ])

                #measure the sampling frequency
                now_point = cv.GetTickCount()

                if float(fs) / 2 < max_bps and fs != 0:
                    max_bps = float(fs) / 2

                if len(input_data) > frame_no:
                    fs = cv.GetTickFrequency() * 1000000. / (now_point -
                                                             time_point)
                    input_data.pop(0)

                    #print my_functions.calc_heart_rate(input_data)
                    final_data = my_functions.calc_heart_rate(input_data)
                    tmp_last_f = my_functions.plot_diagrams(
                        final_data, fs, last_f)
                    last_f = tmp_last_f
                    print last_f

                time_point = now_point
        else:
            print "Can not detect face"

    cv.ShowImage("result", img)