コード例 #1
0
def processa_frame(imagem):
    cv.SetData(imagem_cv, imagem)
    if maos:
      for id in maos:
        cv.PutText(imagem_cv, efeito, maos[id]['atual'] ,fonte_do_texto , cv.CV_RGB(0,0,150))
    cv.PutText(imagem_cv, 'Efeito: '+efeito, (10,20) ,fonte_do_texto , cv.CV_RGB(200,0,0))
    cv.ShowImage('Video', imagem_cv)
コード例 #2
0
    def render(self, window):
        with self.lock:
            if self.image and self.image_time + rospy.Duration(2.0) > rospy.Time.now() and self.info_time + rospy.Duration(2.0) > rospy.Time.now():
                cv2mat = self.bridge.imgmsg_to_cv2(self.image, 'rgb8')
                cvmat = cv.fromarray(cv2mat)
                cv.Resize(cvmat, window)
                interval = min(1,(self.interval / self.max_interval))
                cv.Rectangle(window,
                             (int(0.05*window.width), int(window.height*0.9)),
                             (int(interval*window.width*0.9+0.05*window.width), int(window.height*0.95)),
                             (0, interval*255, (1-interval)*255), thickness=-1)
                cv.Rectangle(window,
                             (int(0.05*window.width), int(window.height*0.9)),
                             (int(window.width*0.9+0.05*window.width), int(window.height*0.95)),
                             (0, interval*255, (1-interval)*255))
                cv.PutText(window, self.ns, (int(window.width * .05), int(window.height * 0.1)), self.font1, (0,0,255))
                if self.features and self.features.header.stamp + rospy.Duration(4.0) > self.image.header.stamp:
                    w_scaling =  float (window.width) / self.image.width
                    h_scaling =  float (window.height) / self.image.height
                    if self.features.success:
                        corner_color = (0,255,0)
                        for cur_pt in self.features.image_points:
                            cv.Circle(window, (int(cur_pt.x*w_scaling), int(cur_pt.y*h_scaling)), int(w_scaling*5), corner_color)
                    else:
                        window = add_text(cv2mat, ["Could not detect", "checkerboard"], False)
                else:
                    window = add_text(cv2mat, ["Timed out waiting", "for checkerboard"], False)

            else:
                # Generate random white noise (for fun)
                noise = numpy.random.rand(window.height, window.width)*256
                numpy.asarray(window)[:,:,0] = noise;
                numpy.asarray(window)[:,:,1] = noise;
                numpy.asarray(window)[:,:,2] = noise;
                cv.PutText(window, self.ns, (int(window.width * .05), int(window.height * .95)), self.font, (0,0,255))
コード例 #3
0
def processa_frame(imagem):
    cv.SetData(imagem_cv, imagem)
    if gesto == False:
        cv.PutText(imagem_cv, 'Acene para ser Rastreado!', (80, 50),
                   fonte_do_texto, cv.CV_RGB(0, 0, 0))
    cv.Circle(imagem_cv, centro, 16, cv.CV_RGB(0, 0, 255), 2, cv.CV_AA, 0)
    cv.PutText(imagem_cv, 'Real(mm): ' + coordenada_real, (80, 435),
               fonte_do_texto, cv.CV_RGB(255, 255, 255))
    cv.PutText(imagem_cv, 'Convertido(px): ' + coordenada_projecao, (80, 465),
               fonte_do_texto, cv.CV_RGB(255, 255, 255))
    cv.ShowImage('Video', imagem_cv)
コード例 #4
0
def processa_frame(imagem):
    cv.SetData(imagem_cv, imagem)
    if maos:
        for id in maos:
            cv.PutText(imagem_cv,
                       ', '.join(str(int(e)) for e in maos[id]['real']),
                       maos[id]['projecao'], fonte_do_texto,
                       cv.CV_RGB(0, 0, 150))
    else:
        cv.PutText(imagem_cv, 'Acene para ser Rastreado', (10, 20),
                   fonte_do_texto, cv.CV_RGB(200, 0, 0))
    cv.ShowImage('Video', imagem_cv)
コード例 #5
0
def DetectRedEyes(image, faceCascade):
    min_size = (20, 20)
    image_scale = 2
    haar_scale = 1.1
    min_neighbors = 2
    haar_flags = 0

    # Allocate the temporary images
    gray = cv.CreateImage((image.width, image.height), 8, 1)
    smallImage = cv.CreateImage((cv.Round(
        image.width / image_scale), cv.Round(image.height / image_scale)), 8,
                                1)

    # Convert color input image to grayscale
    cv.CvtColor(image, gray, cv.CV_BGR2GRAY)

    # Scale input image for faster processing
    cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

    # Equalize the histogram
    cv.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv.HaarDetectObjects(smallImage, faceCascade,
                                 cv.CreateMemStorage(0), haar_scale,
                                 min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:

        #print faces

        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            #print "face"
            global line2
            line2 = n
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            # print pt1
            # print pt2
            cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 1, 8, 0)
            cv.PutText(image, "face" + str(h), pt1, font, cv.RGB(255, 0, 0))
            cv.PutText(image, "Come close.", (0, 20), font, cv.RGB(255, 0, 0))
            cv.PutText(image, "Ensure your forehead is well lit.", (0, 40),
                       font, cv.RGB(255, 0, 0))
            cv.PutText(image, "Hit escape when done.", (0, 60), font,
                       cv.RGB(255, 0, 0))

    cv.ResetImageROI(image)
    return image
コード例 #6
0
 def draw(self):
     p1 = (0, 840)
     p2 = (int(self.w), 840)
     p3 = (0, 1040)
     p4 = (int(self.w), 1040)
     shift = int((self.w - 16 * len(self.text)) / 2)
     cv.PutText(self.src, self.text, (shift, 36), self.font,
                cv.Scalar(32, 0, 220, 0))
     cv.PutText(self.src, str(self.iframe), (880, 52), self.font,
                cv.Scalar(32, 0, 220, 0))
     cv.Line(self.src, p1, p2, cv.Scalar(0, 64, 255, 0), thickness=7)
     cv.Line(self.src, p1, p2, cv.Scalar(160, 0, 0, 0), thickness=2)
     cv.Line(self.src, p3, p4, cv.Scalar(0, 64, 255, 0), thickness=7)
     cv.Line(self.src, p3, p4, cv.Scalar(160, 0, 0, 0), thickness=2)
     self.showImg()
コード例 #7
0
def draw(img, players):
    bluePlayers = []
    redPlayers = []
    for player in players:
        x = player[0][0]
        y = player[0][1]
        team = player[1]

        if (team == 'b'):
            bluePlayers = addIn(bluePlayers, (x, y))
        elif (team == 'r'):
            redPlayers = addIn(redPlayers, (x, y))

    flag = True
    startPoint = 30
    if (bluePlayers == []):
        font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, 0.5, 0.8, 0, 1, 5)
        img = cv.fromarray(img)
        cv.PutText(img, 'blue players not enough, need at least 1 blue player',
                   (0, startPoint), font, (0, 0, 255))
        img = np.asarray(img)
        startPoint += 25
        flag = False
    if (redPlayers == []):
        img = cv.fromarray(img)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, 0.5, 0.8, 0, 1, 5)
        cv.PutText(img, 'red players not enough, need at least 1 red player',
                   (0, startPoint), font, (0, 0, 255))
        img = np.asarray(img)
        flag = False

    if (flag):
        if (bluePlayers[0][0] < redPlayers[1][0]):
            p1, p2 = decidePoint(bluePlayers[0])
            cv2.line(img, p1, p2, color, 2)
            ###print p1[0], ',,, ', p1[1], '///', p2[0], ',,, ', p2[1]
        if (bluePlayers[2][0] < redPlayers[3][0]):
            p1, p2 = decidePoint(redPlayers[3])
            cv2.line(img, p1, p2, color, 2)
            ###print p1[0], ',,, ', p1[1], '///', p2[0], ',,, ', p2[1]

    ###for p in bluePlayers:
    ###print p[0], ', ', p[1], '-----'
    ###print '\n'
    ###for p in redPlayers:
    ###print p[0], ', ', p[1], '-----'
    ###print '\n=======================\n'
    return img
コード例 #8
0
ファイル: motion3.py プロジェクト: mateodurante/taller3
    def run(self):
        started = time.time()
        while True:

            curframe = cv.QueryFrame(self.capture)
            instant = time.time() #Get timestamp o the frame

            self.processImage(curframe) #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant #Update the trigger_time
                    if instant > started +5:#Wait 5 second after the webcam start for luminosity adjusting etc..
                        print "Something is moving !"
                        if self.doRecord: #set isRecording=True only if we record a video
                            self.isRecording = True
            else:
                if instant >= self.trigger_time +10: #Record during 10 seconds
                    print "Stop recording"
                    self.isRecording = False
                else:
                    cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame
                    cv.WriteFrame(self.writer, curframe) #Write the frame

            if self.show:
                cv.ShowImage("Image", curframe)
                cv.ShowImage("Res", self.res)

            cv.Copy(self.frame2gray, self.frame1gray)
            c=cv.WaitKey(1)
            if c==27 or c == 1048603: #Break if user enters 'Esc'.
                break
コード例 #9
0
def get_image(camera, filename=None):
    im = cv.QueryFrame(camera)

    # take greyscale and compute RMS value
    im2 = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_32F, 3)
    cv.Convert(im, im2)
    gray = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_32F, 1)
    cv.CvtColor(im2, gray, cv.CV_RGB2GRAY)
    gray_mat = cv.GetMat(gray)
    img = numpy.asarray(gray_mat)

    power = numpy.sqrt(numpy.mean(img**2))

    #save file
    if filename:
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.8, 0.8, 0, 2,
                           cv.CV_AA)
        cv.PutText(im, filename, (DATE_X, DATE_Y), font, cv.RGB(255, 255, 0))
        filename = os.path.join(DIR_PREFIX, filename + '.jpg')
        print filename
        cv.SaveImage(filename, im)
        del font
    else:
        filename = ''

    #del(camera)
    del im, im2, gray, img, gray_mat

    return (power, filename)
コード例 #10
0
    def run(self):
        started = time.time()
        while True:

            currentframe = cv.QueryFrame(self.capture)
            instant = time.time()  #Get timestamp o the frame

            self.processImage(currentframe)  #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant  #Update the trigger_time
                    if instant > started + 10:  #Wait 5 second after the webcam start for luminosity adjusting etc..
                        print("Something is moving !")
                        if self.doRecord:  #set isRecording=True only if we record a video
                            self.isRecording = True
                cv.DrawContours(currentframe, self.currentcontours,
                                (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
            else:
                if instant >= self.trigger_time + 10:  #Record during 10 seconds
                    print("Stop recording")
                    self.isRecording = False
                else:
                    cv.PutText(currentframe,
                               datetime.now().strftime("%b %d, %H:%M:%S"),
                               (25, 30), self.font, 0)  #Put date on the frame
                    cv.WriteFrame(self.writer, currentframe)  #Write the frame

            if self.show:
                cv.ShowImage("Image", currentframe)

            c = cv.WaitKey(1) % 0x100
            if c == 27 or c == 10:  #Break if user enters 'Esc'.
                break
コード例 #11
0
ファイル: camera2.py プロジェクト: inu1255/rasp-camera
 def record(self, image, now):
     if self.mp4:
         t = time.localtime(now)
         s = time.strftime('%Y-%m-%d %H:%M:%S', t)
         image = cv.fromarray(image)
         cv.PutText(image, s, (30, 30), self.font, (255, 0, 0))
         self.mp4.write(np.asarray(image))
コード例 #12
0
	def addText(self, frame, textTop, textBottom):
		s = cv.GetSize(frame)
		offset = 8
		
		## add space for text notations
		textSize = cv.GetTextSize(textTop, self._font)
		textframe = cv.CreateImage( (s[0], s[1] + 4*textSize[1] + 2*offset), frame.depth, frame.channels)
		cv.Set(textframe, 0)
		cv.SetImageROI(textframe, (0, 2*textSize[1] + offset, s[0], s[1]))
		cv.Copy(frame, textframe)
		cv.ResetImageROI(textframe)
				
		## write text
		cv.PutText(textframe, textTop, (5, 2*textSize[1] + offset/2), self._font, self._fontcolor)
		cv.PutText(textframe, textBottom, (5, int(s[1] + 4*textSize[1] + 1.5 * offset)), self._font, self._fontcolor)
		
		return textframe
コード例 #13
0
ファイル: rompar.py プロジェクト: fourks/rompar
def show_data():
    global Hex
    global Display_Data
    global Display_Binary
    global Search_HEX
    global Grid_Points_x
    global Grid_Points_y
    global Font
    global Data_Read
    global Radius

    if not Data_Read:
        return

    cv.Set(Hex, cv.Scalar(0, 0, 0))
    print
    dat = get_all_data()
    for row in range(Grid_Entries_y):
        out = ''
        outbin = ''
        for column in range(Grid_Entries_x / Bits):
            thisbyte = ord(dat[column * Grid_Entries_y + row])
            hexbyte = '%02X ' % thisbyte
            out += hexbyte
            outbin += to_bin(thisbyte) + ' '
            if Display_Binary:
                dispdata = to_bin(thisbyte)
            else:
                dispdata = hexbyte
            if Display_Data:
                if Search_HEX and Search_HEX.count(thisbyte):
                    cv.PutText(Hex, dispdata,
                               (Grid_Points_x[column * Bits],
                                Grid_Points_y[row] + Radius / 2 + 1), Font,
                               cv.Scalar(0x00, 0xff, 0xff))
                else:
                    cv.PutText(Hex, dispdata,
                               (Grid_Points_x[column * Bits],
                                Grid_Points_y[row] + Radius / 2 + 1), Font,
                               cv.Scalar(0xff, 0xff, 0xff))
        print outbin
        print
        print out
    print
コード例 #14
0
ファイル: lrf.py プロジェクト: Mnemonic7/lrf
def result_image(img, posX, posY, distance):
    hScale = 0.5
    vScale = 0.5
    lineWidth = 1
    font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX | cv.CV_FONT_ITALIC, hScale,
                       vScale, 0, lineWidth)
    cv.PutText(img, ("%0.2fcm" % distance), (10, 20), font,
               cv.Scalar(0, 255, 255))
    cv.Circle(img, (posX, posY), 15, cv.Scalar(0, 255, 0), 2)
    cv.Circle(img, (posX, posY), 2, cv.Scalar(0, 255, 0), 2)
    return img
コード例 #15
0
def add_text(image, text, good = True):
    if good:
        color = (0, 255, 0)
    else:
        color = (0, 0, 255)
    (w, h, depth) = image.shape
    cvmat = cv.fromarray(image)
    for i in range(len(text)):
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.30, float(w)/350, thickness = 1)
        ((text_w, text_h), _) = cv.GetTextSize(text[i], font)
        cv.PutText(cvmat, text[i], (w/2-text_w/2, h/2-text_h/2 + i*text_h*2), font, color)
    return image
コード例 #16
0
    def _put_text(self, face, dis):
        x = face[0]
        y = face[1]
        h = face[3]
        if len(self.remember_age_gender_result) > 0:
            text_to_video_dis_and_age_and_gender = str(dis) + "m "
            cv.PutText(cv.fromarray(self.frame),
                       text_to_video_dis_and_age_and_gender, (x - 2, y - 2),
                       self.font, (255, 0, 0))

            for i in range(0, len(self.face_center_x)):
                if abs(self.cv_center_x-self.face_center_x[i]) < 20 and \
                                abs(self.cv_center_y-self.face_center_y[i]) < 20:

                    text_to_video_dis_and_age_and_gender = str(
                        dis) + "m " + self.remember_age_gender_result[i]
                    text_to_video_name_or_warn = self.remember_person_name_warn_result[
                        i]
                    # deal with voice match on people
                    # self._voice_match_people(text_to_video_name_or_warn)
                    cv.PutText(cv.fromarray(self.frame),
                               text_to_video_dis_and_age_and_gender,
                               (x - 2, y - 2), self.font, (255, 0, 0))
                    y = y + h + 20
                    cv.PutText(cv.fromarray(self.frame),
                               text_to_video_name_or_warn, (x, y), self.font,
                               (255, 0, 0))
                    self.distances[self.file_names[self.count - 1]] = dis
                    break
        else:
            text_to_video_dis_and_age_and_gender = str(dis) + "m "
            text_to_video_name_or_warn = "waiting..."
            cv.PutText(cv.fromarray(self.frame),
                       text_to_video_dis_and_age_and_gender, (x - 2, y - 2),
                       self.font, (255, 0, 0))
            y = y + h + 20
            cv.PutText(cv.fromarray(self.frame), text_to_video_name_or_warn,
                       (x, y), self.font, (255, 0, 0))
            self.distances[self.file_names[self.count - 1]] = dis
def Color_callibration(capture):
    vals = []
    bgr = []
    mini = [255, 255, 255]
    maxi = [0, 0, 0]
    cv.NamedWindow("BGR", 0)
    print 'Please Put Your color in the circular area.Press ESC to start Callibration:'
    while 1:
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    print 'Starting Callibration...Analyzing the Object...'
    for i in range(0, 100):
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Smooth(image, image, cv.CV_MEDIAN, 3, 0)
        imagehsv = cv.CreateImage(cv.GetSize(image), 8, 3)
        cv.CvtColor(image, imagehsv, cv.CV_BGR2YCrCb)
        vals = cv.Get2D(imagehsv, 300, 200)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8)
        cv.PutText(
            image,
            "  " + str(vals[0]) + "," + str(vals[1]) + "," + str(vals[2]),
            (200, 300), font, (55, 25, 255))
        for j in range(0, 3):
            if (vals[j] < mini[j]): mini[j] = vals[j]
            if (vals[j] > maxi[j]): maxi[j] = vals[j]
        cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    print 'Analyzation Completed'
    mini[0] -= 35
    mini[1] -= 15
    mini[2] -= 15
    maxi[0] += 35
    maxi[1] += 15
    maxi[2] += 15
    for i in range(0, 3):
        if (mini[i] < 0):
            mini[i] = 0
        if (maxi[i] > 255):
            maxi[i] = 255
    cv.DestroyWindow("BGR")
    bgr = (mini, maxi)
    return bgr
コード例 #18
0
    def draw_fft(self, frame, fft_data, min_bpm, max_bpm):
        w = frame.width
        h = int(frame.height * Annotator.FFT_HEIGHT)
        x = 0
        y = frame.height

        max_magnitude = max(d[1][0] for d in fft_data)

        def get_position(i):
            point_x = int(
                w *
                (float(fft_data[i][0] - min_bpm) / float(max_bpm - min_bpm)))
            point_y = int(y - ((h * fft_data[i][1][0]) / max_magnitude))
            return point_x, point_y

        line = [get_position(i) for i in range(len(fft_data))]

        cv.PolyLine(frame, [line], False, self.get_colour()[0], 3)

        # Label the largest bin
        max_bin = max(range(len(fft_data)), key=(lambda i: fft_data[i][1][0]))

        x, y = get_position(max_bin)
        c = self.get_colour()
        text = "%0.1f" % fft_data[max_bin][0]

        cv.PutText(frame, text, (x, y), self.small_font_outline, c[1])
        cv.PutText(frame, text, (x, y), self.small_font, c[0])

        # Pulse ring
        r = Annotator.SMALL_PULSE_SIZE
        phase = int(
            ((fft_data[max_bin][1][1] % (2 * numpy.pi)) / numpy.pi) * 180)
        cv.Ellipse(frame, (int(x - (r * 1.5)), int(y - r)), (int(r), int(r)),
                   0, 90, 90 - phase, c[1], Annotator.THIN + Annotator.BORDER)
        cv.Ellipse(frame, (int(x - (r * 1.5)), int(y - r)), (int(r), int(r)),
                   0, 90, 90 - phase, c[0], Annotator.THIN)
コード例 #19
0
ファイル: gui.py プロジェクト: ArgyleStoat/rompar
def show_data(self):
    if not self.data_read:
        return

    cv.Set(self.img_hex, cv.Scalar(0, 0, 0))
    print
    dat = get_all_data(self)
    for row in range(len(self.grid_points_y)):
        out = ''
        outbin = ''
        for column in range(len(self.grid_points_x) / self.group_cols):
            thisbyte = ord(dat[column * len(self.grid_points_y) + row])
            hexbyte = '%02X ' % thisbyte
            out += hexbyte
            outbin += to_bin(thisbyte) + ' '
            if self.config.img_display_binary:
                disp_data = to_bin(thisbyte)
            else:
                disp_data = hexbyte
            if self.config.img_display_data:
                if self.Search_HEX and self.Search_HEX.count(thisbyte):
                    cv.PutText(
                        self.img_hex, disp_data,
                        (self.grid_points_x[column * self.group_cols],
                         self.grid_points_y[row] + self.config.radius / 2 + 1),
                        self.font, cv.Scalar(0x00, 0xff, 0xff))
                else:
                    cv.PutText(
                        self.img_hex, disp_data,
                        (self.grid_points_x[column * self.group_cols],
                         self.grid_points_y[row] + self.config.radius / 2 + 1),
                        self.font, cv.Scalar(0xff, 0xff, 0xff))
        #print outbin
        #print
        #print out
    print
コード例 #20
0
def update_video_with(image):
    cv.SetData(cv_image, image)
    if hands:
      if hands[1]['drawing']:
        update_notification_with('Click to Stop Drawing')
      else:
        update_notification_with('Click to Start Drawing')

      for id in hands:
        cv.PutText(cv_image, hands[id]['color']['name'], hands[id]['current_position'] ,text_font , cv.CV_RGB(255,255,255))
    else:
      update_notification_with('Wave to Interact')
    for button in buttons:
      cv.Rectangle(cv_image, buttons[button]['start'], buttons[button]['end'] , buttons[button]['color'], -1, cv.CV_AA, 0)
    cv.ShowImage('Video', cv_image)
コード例 #21
0
def drawrandline():
    rand = Random()
    img = cv.CreateImage((700, 1000), 8, 3)
    cv.SetZero(img)
    cv.NamedWindow("RandomViewer", 1)
    for i in range(100):
        cv.Line(img, (rand.randrange(0, 700), rand.randrange(0, 1000)),
                (300, 200), (rand.randrange(0, 256), rand.randrange(
                    0, 256), rand.randrange(0, 256)), 1, 8, 0)
        cv.ShowImage("RandomViewer", img)
        cv.WaitKey(5)
    cv.PutText(img, "Hello OpenCV", (100, 200),
               cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 5, 10, 0, 1, 8),
               (255, 255, 255))
    cv.ShowImage("RandomViewer", img)
    cv.WaitKey(0)
    cv.DestroyWindow("RandomViewer")
コード例 #22
0
    def run(self):
        started = time.time()
        while True:
            
            curframe = cv.QueryFrame(self.capture)
            instant = time.time() #Get timestamp o the frame
            
            self.processImage(curframe) #Process the image
            
            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant #Update the trigger_time
                    if instant > started +5:#Wait 5 second after the webcam start for luminosity adjusting etc..
                        print datetime.now().strftime("%b %d, %H:%M:%S"), "Something is moving !"
			host = 'localhost'
			port = 50000
			size = 1024
    			s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    			s.connect((host,port))  
			s.send('stream')
			while True :
    				if not cv.GrabFrame(self.capture) : break
    				frame = cv.RetrieveFrame(self.capture)
    				sys.stdout.write( frame.tostring() )

                        if self.doRecord: #set isRecording=True only if we record a video
                            self.isRecording = True
            else:
                if instant >= self.trigger_time +10: #Record during 10 seconds
                    print datetime.now().strftime("%b %d, %H:%M:%S"), "Stop recording"
                    self.isRecording = False
                else:
                    cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame
                    cv.WriteFrame(self.writer, curframe) #Write the frame
            
            if self.show:
                cv.ShowImage("Image", curframe)
                cv.ShowImage("Res", self.res)
                
            cv.Copy(self.frame2gray, self.frame1gray)
            c=cv.WaitKey(1) % 0x100
            if c==27 or c == 10: #Break if user enters 'Esc'.
                break
コード例 #23
0
    def run(self):
	time.sleep(5)
	started=0
        while True:

            curframe = cv.QueryFrame(self.capture)
            instant = time.time() #Get timestamp o the frame

            self.processImage(curframe) #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant #Update the trigger_time
                    if time.time() > started + 10:
			started = time.time()
                        print "Something is moving !"
			cv.SaveImage('intruder'+str(self.img)+".jpg", curframe)
			sendImage(self.img)
			self.img=self.img+1
                        if self.doRecord: #set isRecording=True only if we record a video
                            self.isRecording = True

            else:
                if instant >= self.trigger_time +10: #Record during 10 seconds
                    print "Stop recording"
                    self.isRecording = False
                else:
                    cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame
                    cv.WriteFrame(self.writer, curframe) #Write the frame

            if self.show:
                cv.ShowImage("Image", curframe)
                cv.ShowImage("Res", self.res)

            cv.Copy(self.frame2gray, self.frame1gray)
            c=cv.WaitKey(1)
            if c==27 or c == 1048603: #Break if user enters 'Esc'.
                break
コード例 #24
0
def DetectRedEyes(image, faceCascade, smileCascade, eyeCascade):
    min_size = (20,20)
    image_scale = 2
    haar_scale = 1.1
    min_neighbors = 2
    haar_flags = 0

    # Allocate the temporary images
    gray = cv.CreateImage((image.width, image.height), 8, 1)
    smallImage = cv.CreateImage((cv.Round(image.width / image_scale),cv.Round (image.height / image_scale)), 8 ,1)

    # Convert color input image to grayscale
    cv.CvtColor(image, gray, cv.CV_BGR2GRAY)

    # Scale input image for faster processing
    cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

    # Equalize the histogram
    cv.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0),
    haar_scale, min_neighbors, haar_flags, min_size)
    global norm
    # If faces are found
    if faces:
        
        #print faces
        ratio = 1.
        for ((x, y, w, h), n) in faces:
        # the input to cv.HaarDetectObjects was resized, so scale the
        # bounding box of each face and convert it to two CvPoints
            #print "face"
            if h!=0:
                ratio = h/norm

            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            # print pt1
            # print pt2
            #cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 1, 8, 0)
            #cv.PutText(image, "face"+str(h), pt1, font, cv.RGB(255, 0, 0))
            face_region = cv.GetSubRect(image,(x,int(y + (h/4)),w,int(h/2)))

            #split face
            #cv.Rectangle(image, (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), pt2, cv.RGB(0,255,0), 1, 8, 0)
            #cv.PutText(image, "lower", (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), font, cv.RGB(0, 255, 0))
            cv.SetImageROI(image, (pt1[0],
                               (pt1[1] + int(abs(pt1[1]-pt2[1]) * 0.625 )),
                               pt2[0] - pt1[0],
                               int((pt2[1] - (pt1[1] + int(abs(pt1[1]-pt2[1]) * 0.625 ))))))
            
            smiles = cv.HaarDetectObjects(image, smileCascade, cv.CreateMemStorage(0), 1.1, 5, 0, (15,15))
        
            if smiles:
                #print smiles          
                for smile in smiles:
                    cv.Rectangle(image,
                    (smile[0][0],smile[0][1]),
                    (smile[0][0] + smile[0][2], smile[0][1] + smile[0][3]),
                    cv.RGB(0, 0, 255), 1, 8, 0)
                    sizer = (smile[0][2]/ratio+smile[0][3]/ratio)#+(smile[1]/ratio))
                    #sizer = math.trunc(sizer)
                    #cv.PutText(image, "smile", (smile[0][0],smile[0][1]), font, cv.RGB(0, 0, 255))

                    cv.PutText(image,str(math.trunc(sizer**2)), (smile[0][0], smile[0][1] + smile[0][3] + 10), font, cv.RGB(0, 0, 255))
                    #print ((abs(smile[0][1] - smile[0][2]) / abs(pt1[0] - pt2[0])) * 100) 
                    
                    global smileneighbour 
                    smileneighbour = sizer**2*2
            cv.ResetImageROI(image)
            #############################################################################
            #############################################################################
            cv.SetImageROI(image, (pt1[0], pt1[1], int(pt2[0]-pt1[0]), int(pt2[1] - pt1[1])) )
            eyes = cv.HaarDetectObjects(image, eyeCascade,cv.CreateMemStorage(0),haar_scale, 5,haar_flags, (15,15))
            if eyes:
                # For each eye found
                iii = 0
                #print eyes
                for eye in eyes:
                    # Draw a rectangle around the eye
                   cv.Rectangle(image,(eye[0][0],eye[0][1]),(eye[0][0] + eye[0][2],eye[0][1] + eye[0][3]), cv.RGB(0, 0, 255), 1, 8, 0)
                   a = math.trunc(float(eye[1])/ratio)
                   cv.PutText(image,str(a), (eye[0][0], eye[0][1] + eye[0][3]), font, cv.RGB(0, 0, 255))
                   global eyetot
                   eyetot += float(eye[1]*eye[1])/ratio
                   iii+=1
                   if iii==2:
                       iii = 0
                       break
            cv.ResetImageROI(image)
    cv.ResetImageROI(image)
    return image
コード例 #25
0
def compute(playerList, video):
    videoName = video
    capture = cv.CaptureFromFile(videoName)

    count = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
    fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
    width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
    height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))

    # store the last frame
    preFrame = cv.CreateImage((width, height), 8, 1)
    # store the current frame
    curFrame = cv.CreateImage((width, height), 8, 1)

    prePyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)
    curPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)

    numOfPlayers = len(playerList)

    # store players moving distance
    players = np.zeros(numOfPlayers)

    # store players position of last frame
    prePlayers = playerList
    # store players position of current frame
    curPlayers = []

    img = cv.CreateImage((width, height), 8, 1)

    #flag of storing player info
    flagInfo = True

    for f in xrange(count):
        frame = cv.QueryFrame(capture)

        if (flagInfo):
            cv.CvtColor(frame, img, cv.CV_BGR2GRAY)
            for i in range(numOfPlayers):
                font = cv.InitFont(cv.CV_FONT_HERSHEY_SCRIPT_SIMPLEX, 0.4, 0.4,
                                   0, 2, 3)

                cv.PutText(
                    img, str(i),
                    (int(prePlayers[i][0][0]), int(prePlayers[i][0][1])), font,
                    (255, 255, 255))
            cv.SaveImage(playerInfo, img)
            flagInfo = False

        #Convert to gray
        cv.CvtColor(frame, curFrame, cv.CV_BGR2GRAY)

        #Calculate the movement using the previous and the current frame using the previous points
        curPlayers, status, err = cv.CalcOpticalFlowPyrLK(
            preFrame, curFrame, prePyr, curPyr, prePlayers, (10, 10), 3,
            (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0)

        ###temp = frame
        # add new distance to list
        for i in range(numOfPlayers):
            players[i] += getDistance(prePlayers[i], curPlayers[i])
            ###cv.Line(temp, (int(prePlayers[i][0]), int(prePlayers[i][1])), (int(curPlayers[i][0]), int(curPlayers[i][1])), (255,122,122),3)

        ###cv.ShowImage("test", temp)
        ###cv2.waitKey(20)

        #Put the current frame preFrame
        cv.Copy(curFrame, preFrame)
        prePlayers = curPlayers
    ###cv2.destroyAllWindows()
    # print distance
    i = 0
    f = open(recordFile, 'w')
    for player in players:
        i += 1
        print "player", i, "running distance: ", player, "\n"
        f.write("player" + str(i) + " running distance: " + str(player) +
                "meters\n")
コード例 #26
0
def DetectRedEyes(image, faceCascade, smileCascade):
    min_size = (20, 20)
    image_scale = 2
    haar_scale = 1.2
    min_neighbors = 2
    haar_flags = 0

    # Allocate the temporary images
    gray = cv.CreateImage((image.width, image.height), 8, 1)
    smallImage = cv.CreateImage((cv.Round(
        image.width / image_scale), cv.Round(image.height / image_scale)), 8,
                                1)

    # Convert color input image to grayscale
    cv.CvtColor(image, gray, cv.CV_BGR2GRAY)

    # Scale input image for faster processing
    cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

    # Equalize the histogram
    cv.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv.HaarDetectObjects(smallImage, faceCascade,
                                 cv.CreateMemStorage(0), haar_scale,
                                 min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:

        #print faces

        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            #print "face"
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            # print pt1
            # print pt2
            #cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 1, 8, 0)
            #cv.PutText(image, "face", pt1, font, cv.RGB(255, 0, 0))
            face_region = cv.GetSubRect(image,
                                        (x, int(y + (h / 4)), w, int(h / 2)))

            #split face
            #cv.Rectangle(image, (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), pt2, cv.RGB(0,255,0), 1, 8, 0)
            #cv.PutText(image, "lower", (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), font, cv.RGB(0, 255, 0))
            cv.SetImageROI(
                image, (pt1[0],
                        (pt1[1] + (abs(pt1[1] - pt2[1]) / 2)), pt2[0] - pt1[0],
                        int((pt2[1] - (pt1[1] + (abs(pt1[1] - pt2[1]) / 2))))))

            smiles = cv.HaarDetectObjects(image, smileCascade,
                                          cv.CreateMemStorage(0), 1.1, 5, 0,
                                          (15, 15))

            if smiles:
                #print smiles

                for smile in smiles:
                    cv.Rectangle(
                        image, (smile[0][0], smile[0][1]),
                        (smile[0][0] + smile[0][2], smile[0][1] + smile[0][3]),
                        cv.RGB(0, 0, 255), 1, 8, 0)

                    cv.PutText(image, "smile", (smile[0][0], smile[0][1]),
                               font, cv.RGB(0, 0, 255))

                    cv.PutText(image, str(smile[1]),
                               (smile[0][0], smile[0][1] + smile[0][3]), font,
                               cv.RGB(0, 0, 255))
                    #print ((abs(smile[0][1] - smile[0][2]) / abs(pt1[0] - pt2[0])) * 100)

                    global smileness
                    smileness = smile[1]
            cv.ResetImageROI(image)
            #if smile[1] > 90:
            #    mqttc.publish("smiles", "got smile", 1)
            #    time.sleep(5)

        #eyes = cv.HaarDetectObjects(image, eyeCascade,
        #cv.CreateMemStorage(0),
        #haar_scale, min_neighbors,
        #haar_flags, (15,15))

        #if eyes:
        # For each eye found

        #print eyes

        #for eye in eyes:
        # Draw a rectangle around the eye
        #   cv.Rectangle(image,
        #   (eye[0][0],
        #   eye[0][1]),
        #   (eye[0][0] + eye[0][2],
        #   eye[0][1] + eye[0][3]),
        #   cv.RGB(255, 0, 0), 1, 8, 0)

    cv.ResetImageROI(image)
    return image
コード例 #27
0
 def draw_label(self, img, pixmapper):
     pix1 = pixmapper(self.point)
     cv.PutText(img, self.label, pix1,
                cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0),
                self.colour)
コード例 #28
0
ファイル: dmtx.py プロジェクト: 2RoN4eG/Regula-Test-Task
scribble = cv.CloneMat(bg)

if 0:
    for i in range(10):
        df.find(bg)

for (sym, coords) in df.find(bg).items():
    print sym
    cv.PolyLine(scribble, [coords],
                1,
                cv.CV_RGB(255, 0, 0),
                1,
                lineType=cv.CV_AA)
    Xs = [x for (x, y) in coords]
    Ys = [y for (x, y) in coords]
    where = ((min(Xs) + max(Xs)) / 2, max(Ys) - 50)
    cv.PutText(scribble, sym, where, font, cv.RGB(0, 255, 0))

cv.ShowImage("results", scribble)
cv.WaitKey()
cv.DestroyAllWindows()

sys.exit(0)

capture = cv.CaptureFromCAM(0)
while True:
    img = cv.QueryFrame(capture)
    cv.ShowImage("capture", img)
    print df.find(img)
    cv.WaitKey(6)
コード例 #29
0
import cv2.cv as cv

image=cv.LoadImage('../img/lena.jpg', cv.CV_LOAD_IMAGE_COLOR) #Load the image

font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8) #Creates a font

y = image.height / 2 # y position of the text
x = image.width / 4 # x position of the text

cv.PutText(image,"Hello World !", (x,y),font, cv.RGB(255, 255, 255)) #Draw the text

cv.ShowImage('Hello World', image) #Show the image
cv.WaitKey(0)
コード例 #30
0
#coding=utf-8

import cv2.cv as cv

image = cv.LoadImage('meinv.jpg', cv.CV_LOAD_IMAGE_COLOR)

font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
y = image.height / 4
x = image.width / 2

cv.PutText(image, "Hello Meinv!", (x, y), font, cv.RGB(0, 0, 0))

thumb = cv.CreateImage((image.width / 2, image.height / 2), cv.CV_8UC2, 3)
cv.Resize(image, thumb)
#cvt = cv.CreateImage(cv.GetSize(thumb), cv.CV_8UC2, 3)
#cv.CvtColor(thumb, cvt, cv.CV_RGB2BGR)
#cv.NamedWindow('Image', cv.CV_WINDOW_AUTOSIZE)

grey = cv.CreateImage(cv.GetSize(thumb), 8, 1)
cv.CvtColor(thumb, grey, cv.CV_RGB2GRAY)
cv.ShowImage('Greyed', grey)

smoothed = cv.CloneImage(thumb)
cv.Smooth(thumb, smoothed, cv.CV_MEDIAN)
cv.ShowImage('Smoothed', smoothed)

cv.EqualizeHist(grey, grey)
cv.ShowImage('Equalized', grey)

threshold1 = cv.CloneImage(grey)
cv.Threshold(threshold1, threshold1, 100, 255, cv.CV_THRESH_BINARY)