예제 #1
0
def talker():
    #cv.NamedWindow("")
    global height
    global width
    global fps
    global check_fps_set
    global capture
    global capture1
    global cam
    #capture =cv.CaptureFromCAM(0)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, int(width))
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, int(height))
    #cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FPS,2)
    #rate=cv.GetCaptureProperty(capture,cv.CV_CAP_PROP_FPS)
    #print rate
    bridge = CvBridge()
    pub = rospy.Publisher('chatter', Image)
    rospy.init_node('talker', anonymous=True)

    rospy.Subscriber("config", String, callback)
    r = rospy.Rate(int(fps))  # 10hz
    time1 = 0
    frames = 0
    start_time = 0
    check = 0
    while not rospy.is_shutdown():
        #str = "hello world %s"%rospy.get_time()
        cv.QueryFrame(capture)
        cv.QueryFrame(capture)
        cv.QueryFrame(capture)
        frame = cv.QueryFrame(capture)
        time1 = time.time()
        print "after frame capture: " + str(time1)
        if cam == str(4):
            frame = cv.QueryFrame(capture)
            frame1 = cv.QueryFrame(capture1)
            img = np.asarray(frame[:, :])
            img1 = np.asarray(frame1[:, :])
            both = np.hstack((img, img1))
            bitmap = cv.CreateImageHeader((both.shape[1], both.shape[0]),
                                          cv.IPL_DEPTH_8U, 3)
            cv.SetData(bitmap, both.tostring(),
                       both.dtype.itemsize * 3 * both.shape[1])
            frame = bitmap
        if check == 0:
            check = 1
            start_time = time.time()
        if check_fps_set == 1:
            r = rospy.Rate(int(fps))
            print "fps: " + fps
            start_time = time.time()
            frames = 0
            check_fps_set = 0
        frames = frames + 1
        if frames % 10 == 0:
            curtime = time.time()
            diff = curtime - start_time
            fps = frames / diff
            print fps
        #ret,frame=capture.read()
        #image=np.asarray(frame[:,:])

#a=image.shape
#print a
#rospy.loginfo(st)
        pub.publish(bridge.cv_to_imgmsg(frame, "bgr8"))
        time1 = time.time()
        print "after frame publish: " + str(time1)
        r.sleep()
        time1 = time.time()
        print "after sleep: " + str(time1)
예제 #2
0
import cv

# setup webcam
capture = cv.CaptureFromCAM(0)

# set resolution
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240)

# setup file output
filenm = "output.avi"
codec = 0
fps = 15
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
writer = cv.CreateVideoWriter(filenm, codec, fps, (width, height), 1)


# display frame on screen and write it to disk
def DisplayWriteFrame(image):
    cv.ShowImage('Image_Window', image)

    i = 0
    while i < 15:
        cv.WriteFrame(writer, image)
        i += 1

    cv.WaitKey(1000)


j = 0
예제 #3
0
	def atualiza_frame(self,frame):
		cv.SetCaptureProperty(video,cv.CV_CAP_PROP_POS_FRAMES,frame)
		self.imagem = cv.QueryFrame(video)
		#cv.SaveImage('fundo2.jpg',self.imagem)
		cv.Smooth(self.imagem,self.imagem,cv.CV_GAUSSIAN,filtro_de_gauss)
		self.processa()
예제 #4
0
if __name__ == '__main__':

    cascade = cv.Load("face.xml")

    capture = cv.CreateCameraCapture(0)

    cv.NamedWindow("Face detection", 1)

    width = 320  #leave None for auto-detection
    height = 240  #leave None for auto-detection

    if width is None:
        width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)

    if height is None:
        height = int(
            cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)

    if capture:
        frame_copy = None
        while True:

            frame = cv.QueryFrame(capture)
            if not frame:
                cv.WaitKey(0)
                break
예제 #5
0
 def setResolution(self):
     cv.SetCaptureProperty(self.camera, cv.CV_CAP_PROP_FRAME_WIDTH, 1920)
     cv.SetCaptureProperty(self.camera, cv.CV_CAP_PROP_FRAME_HEIGHT, 1080)
def run_real_time_recognition(para_path, Labels):

    status_dictionary = {}
    # status, pos, radias, color, text, ,pos, font_color
    # states:
    #   0 -> waiting to be hovered
    #   1 -> hovered waiting to be selected(clicked)
    #   2 -> selected waiting to be unselected(clicked)
    start_time = 0
    status_dictionary['b1'] = [False, (530, 70), 60, (255, 255, 0), 'Record', (490, 70), (0,0,0), [], False]
    status_dictionary['b2'] = [False, (380, 70), 60, (0, 255, 0), 'Select', (350, 70), (0,0,0), [], False]
    status_dictionary['b3'] = [False, (240, 70), 60, (0, 255, 255), 'Billard', (210, 70),(0,0,0), [], False]
    status_dictionary['b4'] = [False, (100, 270), 90, (255, 255, 255), 'Drag Me', (70, 270),(0,0,0), [], False]

    global depth,ir, rgb
    count = 0

    # frame_size = (480,640)
    # Setting web cam config
    capture=cv.CaptureFromCAM(0)
    fourcc = cv.CV_FOURCC('X','V','I','D')
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FPS, 25)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

    # Neuronet Configuration
    resize_row = 20
    resize_width = 20
    weights = loadmat(para_path)
    T1 = weights['Theta1']
    T2 = weights['Theta2']
    history_prediction = [] # smoothing and other purpose
    history_gesture_pos = [] # smoothing and other purpose

    # Recording
    record_st = False
    rgb_writer = cv.CreateVideoWriter("recording.avi", cv.CV_FOURCC('X','V','I','D'), 5, (640, 480), True)

    # Capture frames IR, RGB, Depth
    while True:

        # Web cam feed (300, 400, 3)
        rgb_ipl = cv.QueryFrame(capture)

        # Depth IR feed
        (depth,_), (ir,_) = get_depth(), get_video(format=2)

        ir = (ir>150).astype(float)
        ir = ir*255
        ir_ipl = resize_ir_callibrate_with_rgb(ir)

        new_rgb_ipl = cv.CreateImage(cv.GetSize(rgb_ipl), 8, 3)


        #Billard Mode
        yo = rgb_ipl
        f = iplimage_to_numpy_color(yo)
        green_mono = f[:,:,1]
        #image = cv.fromarray(np.array(green_mono[:,:]))
        #cv.ShowImage('G', image)

        rgb_np, threshold_np, contour_list = billard_extract_and_draw_countour(f, 20, green_mono, 120, 0)
        image = cv.fromarray(np.array(rgb_np))

        #print contour_list
        maxx = (0,0,0,0)
        for pos in contour_list:
            if pos[1] > maxx[1]:
                maxx = pos
        #print maxx

        for item in contour_list:
            if maxx != item:
                cv.Line(image, (maxx[0]+maxx[2]/2, maxx[1]+maxx[3]/2), (item[0]+item[2]/2,item[1]+item[3]/2), (0,255,0), thickness=1, lineType=8, shift=0)
        #cv.ShowImage('G Threshold', image)
        new_rgb_ipl = cvMat_to_iplimage_color(image)
        #cv.ShowImage('G Threshold', new_rgb_ipl)







        # Hand Sengmentation
        rgb_np, ir_np, contour_list, history_gesture_pos = real_time_extract_and_draw_countour(ir_ipl, rgb_ipl, 20000, history_gesture_pos)

        # Gesture Recognition
        if contour_list:
            ir_ipl, rgb_ipl, history_prediction = real_time_gesture_recognition_and_labeling(ir_np, rgb_np, contour_list, T1, T2, Labels, history_prediction, False)

            # Update button status
            status_dictionary, start_time = update_button_status(contour_list, history_prediction, Labels, status_dictionary, history_gesture_pos, False, start_time)

        draw_menu_button(ir_ipl, rgb_ipl, status_dictionary, start_time)


        # resize for full screen display
        """
        rgb_np = iplimage_to_numpy_color(rgb_ipl)
        rgb_np = imresize(rgb_np, (800, 1066))
        image = cv.fromarray(np.array(rgb_np))
        cv.ShowImage('rgb', image)
        """
        if status_dictionary['b3'][0]:
            opacity = 0.4
            cv.AddWeighted(new_rgb_ipl, opacity, rgb_ipl, 1 - opacity, 0, rgb_ipl)

        if status_dictionary['b1'][0]:
            cv.WriteFrame(rgb_writer, rgb_ipl)
        else:
            record_status=False

        cv.ShowImage('rgb', rgb_ipl)
        cv.ShowImage('ir', ir_ipl)

        c=cv.WaitKey(5)
        if c==27: #Break if user enters 'Esc'.
            break
예제 #7
0
def callback(data):
    print "data: " + data.data
    global height
    global width
    global capture
    global capture1
    global fps
    global cam
    global check_fps_set
    global sub
    string = str(data.data)
    b = string.split(',')
    height = str(b[2])
    width = str(b[3])
    prev_cam = cam
    cam = str(b[0])

    fps = str(b[1])
    if prev_cam == str(2):
        #pass
        sub.unregister()
    if cam == str(0):
        capture = None
        capture1 = None
        capture = cv.CaptureFromCAM(0)
        #sub.unregister()
    if cam == str(1):
        capture = None
        capture1 = None
        capture = cv.CaptureFromCAM(1)
        #sub.unregister()
    '''if cam==str(2):
        capture=None
        capture1=None
        capture=cv.CaptureFromCAM(2)
        sub.unregister()'''
    if cam == str(3):
        capture = None
        capture1 = None
        capture = cv.CaptureFromCAM(3)
        #sub.unregister()
    if cam == str(2):
        capture = None
        capture1 = None
        sub = rospy.Subscriber('/image_raw/compressed',
                               CompressedImage,
                               callback1,
                               queue_size=1)
        check_fps_set = 1
        return
    if cam == str(4):
        capture = None
        capture1 = None
        #sub.unregister()
        capture = cv.CaptureFromCAM(0)
        capture1 = cv.CaptureFromCAM(1)
        check_fps_set = 1
        return
    check_fps_set = 1
    #print "prev_cam: "+str(prev_cam)+" new cam: "+str(cam)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, int(width))
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, int(height))
예제 #8
0
def onTrackbarSlide(pos):
    if (listenToSlider):
        downUpdateTrackbarPos = True
        cv.SetCaptureProperty(g_capture, cv.CV_CAP_PROP_POS_FRAMES, pos)
        downUpdateTrackbarPos = False
 def setResolution(self):
     #cv.SetCaptureProperty(self.camera, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
     #cv.SetCaptureProperty(self.camera, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
     cv.SetCaptureProperty(self.camera, cv.CV_CAP_PROP_FRAME_WIDTH, 1280)
     cv.SetCaptureProperty(self.camera, cv.CV_CAP_PROP_FRAME_HEIGHT, 720)
예제 #10
0
 def setResolution(self, width=640, height=480):
     cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)
     cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT,
                           height)
예제 #11
0
# Setup the webcam and font
# ---------------------------

# define image size
imageWidth = 320
imageHeight = 240

# create a window object
cv.NamedWindow("window1", cv.CV_WINDOW_AUTOSIZE)
camera_index = 0

# create a camera object
capture = cv.CaptureFromCAM(camera_index)

# set capture width and height
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, imageWidth)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, imageHeight)

# create a font
font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX_SMALL, 0.5, 0.5, 0, 1, cv.CV_AA)

GPIO.setmode(GPIO.BOARD)
sw = 11
GPIO.setup(11, GPIO.IN)

print "Hello, This is Capture photo program."
print "Press 'q' : to Ending program."
print "Press 'sw' : to Save the image."
print "=========================================="
print ""
time.sleep(0.5)
예제 #12
0
        # print "Sending %f " % time.time()
        s.send(('%f ' % time.time()).encode('latin-1'))
    except:
        pass
    cv.WriteFrame(writer, im)


if __name__ == "__main__":

    fps = 25.0
    target_dur = 1.0 / fps

    camera = cv.CreateCameraCapture(0)
    width = cv.GetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_WIDTH)
    height = cv.GetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_HEIGHT)
    cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    cv.SetCaptureProperty(camera, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
    width = 640
    height = 480
    print("WEBCAM: Initialized camera with", width, "x", height)
    global gMapping
    gMapping = cv.CreateMat(2, 3, cv.CV_32FC1)
    cv.GetRotationMatrix2D((width / 2.0, height / 2.0), 180.0, 1.0, gMapping)

    cv.NamedWindow("Webcam")
    cv.MoveWindow("Webcam", 1280, 0)
    im = get_image(camera, width, height)
    cv.ShowImage("Webcam", im)
    time.sleep(0.2)

    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
예제 #13
0
 def _do_framerate(self, val):
     cv.SetCaptureProperty(self.cam, cv.CV_CAP_PROP_FPS, val)
예제 #14
0
 def _do_shutter(self, val):
     cv.SetCaptureProperty(self.cam, cv.CV_CAP_PROP_EXPOSURE, val)
예제 #15
0
 def _do_gain(self, val):
     cv.SetCaptureProperty(self.cam, cv.CV_CAP_PROP_GAIN, val)